Disks.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "cache_cf.h"
13 #include "ConfigParser.h"
14 #include "debug/Messages.h"
15 #include "debug/Stream.h"
16 #include "globals.h"
17 #include "sbuf/Stream.h"
18 #include "SquidConfig.h"
19 #include "Store.h"
20 #include "store/Disk.h"
21 #include "store/Disks.h"
22 #include "store_rebuild.h"
23 #include "StoreFileSystem.h"
24 #include "swap_log_op.h"
25 #include "tools.h"
26 
27 typedef SwapDir *STDIRSELECT(const StoreEntry *e);
28 
36 
39 static int64_t
41 {
42  // entry.objectLen() is negative here when we are still STORE_PENDING
43  int64_t minSize = entry.mem_obj->expectedReplySize();
44 
45  // If entry size is unknown, use already accumulated bytes as an estimate.
46  // Controller::accumulateMore() guarantees that there are enough of them.
47  if (minSize < 0)
48  minSize = entry.mem_obj->endOffset();
49 
50  assert(minSize >= 0);
51  minSize += entry.mem_obj->swap_hdr_sz;
52  return minSize;
53 }
54 
56 static SwapDir &
57 SwapDirByIndex(const int i)
58 {
59  assert(i >= 0);
61  const auto sd = INDEXSD(i);
62  assert(sd);
63  return *sd;
64 }
65 
71 static SwapDir *
73 {
74  const int64_t objsize = objectSizeForDirSelection(*e);
75 
76  // Increment the first candidate once per selection (not once per
77  // iteration) to reduce bias when some disk(s) attract more entries.
78  static int firstCandidate = 0;
79  if (++firstCandidate >= Config.cacheSwap.n_configured)
80  firstCandidate = 0;
81 
82  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
83  const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
84  auto &dir = SwapDirByIndex(dirn);
85 
86  int load = 0;
87  if (!dir.canStore(*e, objsize, load))
88  continue;
89 
90  if (load < 0 || load > 1000) {
91  continue;
92  }
93 
94  return &dir;
95  }
96 
97  return nullptr;
98 }
99 
113 static SwapDir *
115 {
116  int64_t most_free = 0;
117  int64_t best_objsize = -1;
118  int least_load = INT_MAX;
119  int load;
120  SwapDir *selectedDir = nullptr;
121  int i;
122 
123  const int64_t objsize = objectSizeForDirSelection(*e);
124 
125  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
126  auto &sd = SwapDirByIndex(i);
127  sd.flags.selected = false;
128 
129  if (!sd.canStore(*e, objsize, load))
130  continue;
131 
132  if (load < 0 || load > 1000)
133  continue;
134 
135  if (load > least_load)
136  continue;
137 
138  const int64_t cur_free = sd.maxSize() - sd.currentSize();
139 
140  /* If the load is equal, then look in more details */
141  if (load == least_load) {
142  /* best max-size fit */
143  if (best_objsize != -1) {
144  // cache_dir with the smallest max-size gets the known-size object
145  // cache_dir with the largest max-size gets the unknown-size object
146  if ((objsize != -1 && sd.maxObjectSize() > best_objsize) ||
147  (objsize == -1 && sd.maxObjectSize() < best_objsize))
148  continue;
149  }
150 
151  /* most free */
152  if (cur_free < most_free)
153  continue;
154  }
155 
156  least_load = load;
157  best_objsize = sd.maxObjectSize();
158  most_free = cur_free;
159  selectedDir = &sd;
160  }
161 
162  if (selectedDir)
163  selectedDir->flags.selected = true;
164 
165  return selectedDir;
166 }
167 
169  largestMinimumObjectSize(-1),
170  largestMaximumObjectSize(-1),
171  secondLargestMaximumObjectSize(-1)
172 {
173 }
174 
175 SwapDir *
176 Store::Disks::store(int const x) const
177 {
178  return &SwapDirByIndex(x);
179 }
180 
181 SwapDir &
182 Store::Disks::Dir(const int i)
183 {
184  return SwapDirByIndex(i);
185 }
186 
187 int
189 {
190  int result = 0;
191  int j;
192  static int ndir = 0;
193 
194  do {
195  j = 0;
196 
197  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
198  if (ndir >= Config.cacheSwap.n_configured)
199  ndir = ndir % Config.cacheSwap.n_configured;
200 
201  int temp_result = store(ndir)->callback();
202 
203  ++ndir;
204 
205  j += temp_result;
206 
207  result += temp_result;
208 
209  if (j > 100)
210  fatal ("too much io\n");
211  }
212  } while (j > 0);
213 
214  ++ndir;
215 
216  return result;
217 }
218 
219 void
221 {
222  if (Config.cacheSwap.n_configured == 0) {
223  debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
224  }
225 
226  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
227  if (Dir(i).active())
228  store(i)->create();
229  }
230 }
231 
232 StoreEntry *
234 {
235  if (const int cacheDirs = Config.cacheSwap.n_configured) {
236  // ask each cache_dir until the entry is found; use static starting
237  // point to avoid asking the same subset of disks more often
238  // TODO: coordinate with put() to be able to guess the right disk often
239  static int idx = 0;
240  for (int n = 0; n < cacheDirs; ++n) {
241  idx = (idx + 1) % cacheDirs;
242  auto &sd = Dir(idx);
243  if (!sd.active())
244  continue;
245 
246  if (auto e = sd.get(key)) {
247  debugs(20, 7, "cache_dir " << idx << " has: " << *e);
248  return e;
249  }
250  }
251  }
252 
253  debugs(20, 6, "none of " << Config.cacheSwap.n_configured <<
254  " cache_dirs have " << storeKeyText(key));
255  return nullptr;
256 }
257 
258 void
260 {
261  if (Config.Store.objectsPerBucket <= 0)
262  fatal("'store_objects_per_bucket' should be larger than 0.");
263 
264  if (Config.Store.avgObjectSize <= 0)
265  fatal("'store_avg_object_size' should be larger than 0.");
266 
267  /* Calculate size of hash table (maximum currently 64k buckets). */
268  /* this is very bogus, its specific to the any Store maintaining an
269  * in-core index, not global */
271  debugs(20, Important(31), "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
272  " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
274  debugs(20, Important(32), "Target number of buckets: " << buckets);
275  /* ideally the full scan period should be configurable, for the
276  * moment it remains at approximately 24 hours. */
278  debugs(20, Important(33), "Using " << store_hash_buckets << " Store buckets");
279  debugs(20, Important(34), "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
280  (Config.memShared ? " [shared]" : ""));
281  debugs(20, Important(35), "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
282 
285 
286  // Increment _before_ any possible storeRebuildComplete() calls so that
287  // storeRebuildComplete() can reliably detect when all disks are done. The
288  // level is decremented in each corresponding storeRebuildComplete() call.
290 
291  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
292  /* this starts a search of the store dirs, loading their
293  * index. under the new Store api this should be
294  * driven by the StoreHashIndex, not by each store.
295  *
296  * That is, the HashIndex should perform a search of each dir it is
297  * indexing to do the hash insertions. The search is then able to
298  * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
299  * 'from-no-log'.
300  *
301  * Step 1: make the store rebuilds use a search internally
302  * Step 2: change the search logic to use the four modes described
303  * above
304  * Step 3: have the hash index walk the searches itself.
305  */
306  if (Dir(i).active())
307  store(i)->init();
308  else
309  storeRebuildComplete(nullptr);
310  }
311 
312  if (strcasecmp(Config.store_dir_select_algorithm, "round-robin") == 0) {
314  debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
315  } else {
317  debugs(47, Important(36), "Using Least Load store dir selection");
318  }
319 }
320 
321 uint64_t
323 {
324  uint64_t result = 0;
325 
326  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
327  if (Dir(i).doReportStat())
328  result += store(i)->maxSize();
329  }
330 
331  return result;
332 }
333 
334 uint64_t
336 {
337  uint64_t result = 0;
338 
339  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
340  if (Dir(i).doReportStat())
341  result += store(i)->minSize();
342  }
343 
344  return result;
345 }
346 
347 uint64_t
349 {
350  uint64_t result = 0;
351 
352  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
353  if (Dir(i).doReportStat())
354  result += store(i)->currentSize();
355  }
356 
357  return result;
358 }
359 
360 uint64_t
362 {
363  uint64_t result = 0;
364 
365  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
366  if (Dir(i).doReportStat())
367  result += store(i)->currentCount();
368  }
369 
370  return result;
371 }
372 
373 int64_t
375 {
376  return largestMaximumObjectSize;
377 }
378 
379 void
381 {
383  Controller::store_dirs_rebuilding = 0; // nothing to index
384 
385  largestMinimumObjectSize = -1;
386  largestMaximumObjectSize = -1;
387  secondLargestMaximumObjectSize = -1;
388 
390 
391  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
392  auto &disk = Dir(i);
393  if (disk.needsDiskStrand()) {
394  assert(InDaemonMode());
395  // XXX: Do not pretend to support disk.disker changes during reconfiguration
396  disk.disker = Config.workers + (++Config.cacheSwap.n_strands);
397  }
398 
399  if (!disk.active())
400  continue;
401 
402  if (disk.minObjectSize() > largestMinimumObjectSize)
403  largestMinimumObjectSize = disk.minObjectSize();
404 
405  const auto diskMaxObjectSize = disk.maxObjectSize();
406  if (diskMaxObjectSize > largestMaximumObjectSize) {
407  if (largestMaximumObjectSize >= 0) // was set
408  secondLargestMaximumObjectSize = largestMaximumObjectSize;
409  largestMaximumObjectSize = diskMaxObjectSize;
410  }
411  }
412 }
413 
414 void
416 {
417  const auto typeStr = ConfigParser::NextToken();
418  if (!typeStr)
419  throw TextException("missing cache_dir parameter: storage type", Here());
420 
421  const auto pathStr = ConfigParser::NextToken();
422  if (!pathStr)
423  throw TextException("missing cache_dir parameter: directory name", Here());
424 
425  const auto fs = StoreFileSystem::FindByType(typeStr);
426  if (!fs) {
427  debugs(3, DBG_PARSE_NOTE(DBG_IMPORTANT), "ERROR: This proxy does not support the '" << typeStr << "' cache type. Ignoring.");
428  return;
429  }
430 
431  const auto fsType = fs->type();
432 
433  // check for the existing cache_dir
434  // XXX: This code mistreats duplicated cache_dir entries (that should be fatal).
435  for (int i = 0; i < swap.n_configured; ++i) {
436  auto &disk = Dir(i);
437  if ((strcasecmp(pathStr, disk.path)) == 0) {
438  /* this is specific to on-fs Stores. The right
439  * way to handle this is probably to have a mapping
440  * from paths to stores, and have on-fs stores
441  * register with that, and lookip in that in their
442  * own setup logic. RBC 20041225. TODO.
443  */
444 
445  if (strcmp(disk.type(), fsType) == 0)
446  disk.reconfigure();
447  else
448  debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " <<
449  disk.type() << " " << disk.path << " to " << fsType << ". Restart required");
450 
451  return;
452  }
453  }
454 
455  const int cacheDirCountLimit = 64; // StoreEntry::swap_dirn is a signed 7-bit integer
456  if (swap.n_configured >= cacheDirCountLimit)
457  throw TextException(ToSBuf("Squid cannot handle more than ", cacheDirCountLimit, " cache_dir directives"), Here());
458 
459  // create a new cache_dir
460  allocate_new_swapdir(swap);
461  swap.swapDirs[swap.n_configured] = fs->createSwapDir();
462  auto &disk = Dir(swap.n_configured);
463  disk.parse(swap.n_configured, pathStr);
464  ++swap.n_configured;
465 }
466 
467 void
468 Store::Disks::Dump(const DiskConfig &swap, StoreEntry &entry, const char *name)
469 {
470  for (int i = 0; i < swap.n_configured; ++i) {
471  const auto &disk = Dir(i);
472  storeAppendPrintf(&entry, "%s %s %s", name, disk.type(), disk.path);
473  disk.dump(entry);
474  storeAppendPrintf(&entry, "\n");
475  }
476 }
477 
478 int64_t
480 {
481  const auto accumulated = entry.mem_obj->availableForSwapOut();
482 
483  /*
484  * Keep accumulating more bytes until the set of disks eligible to accept
485  * the entry becomes stable, and, hence, accumulating more is not going to
486  * affect the cache_dir selection. A stable set is usually reached
487  * immediately (or soon) because most configurations either do not use
488  * cache_dirs with explicit min-size/max-size limits or use the same
489  * max-size limit for all cache_dirs (and low min-size limits).
490  */
491 
492  // Can the set of min-size cache_dirs accepting this entry change?
493  if (accumulated < largestMinimumObjectSize)
494  return largestMinimumObjectSize - accumulated;
495 
496  // Can the set of max-size cache_dirs accepting this entry change
497  // (other than when the entry exceeds the largest maximum; see below)?
498  if (accumulated <= secondLargestMaximumObjectSize)
499  return secondLargestMaximumObjectSize - accumulated + 1;
500 
501  /*
502  * Checking largestMaximumObjectSize instead eliminates the risk of starting
503  * to swap out an entry that later grows too big, but also implies huge
504  * accumulation in most environments. Accumulating huge entries not only
505  * consumes lots of RAM but also creates a burst of doPages() write requests
506  * that overwhelm the disk. To avoid these problems, we take the risk and
507  * allow swap out now. The disk will quit swapping out if the entry
508  * eventually grows too big for its selected cache_dir.
509  */
510  debugs(20, 3, "no: " << accumulated << '>' <<
511  secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
512  return 0;
513 }
514 
515 void
517 {
518  // accumulate per-disk cache stats
519  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
520  StoreInfoStats dirStats;
521  store(i)->getStats(dirStats);
522  stats += dirStats;
523  }
524 
525  // common to all disks
526  stats.swap.open_disk_fd = store_open_disk_fd;
527 
528  // memory cache stats are collected in StoreController::getStats(), for now
529 }
530 
531 void
533 {
534  int i;
535 
536  /* Now go through each store, calling its stat routine */
537 
538  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
539  storeAppendPrintf(&output, "\n");
540  store(i)->stat(output);
541  }
542 }
543 
544 void
546 {
547  e.disk().reference(e);
548 }
549 
550 bool
552 {
553  return e.disk().dereference(e);
554 }
555 
556 void
558 {
559  Must(e);
560  return e->disk().updateHeaders(e);
561 }
562 
563 void
565 {
566  int i;
567  /* walk each fs */
568 
569  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
570  /* XXX FixMe: This should be done "in parallel" on the different
571  * cache_dirs, not one at a time.
572  */
573  /* call the maintain function .. */
574  store(i)->maintain();
575  }
576 }
577 
578 void
580 {
581  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
582  store(i)->sync();
583 }
584 
585 void
587  if (e.hasDisk()) {
588  // TODO: move into Fs::Ufs::UFSSwapDir::evictCached()
589  if (!EBIT_TEST(e.flags, KEY_PRIVATE)) {
590  // log before evictCached() below may clear hasDisk()
592  }
593 
594  e.disk().evictCached(e);
595  return;
596  }
597 
598  if (const auto key = e.publicKey())
599  evictIfFound(key);
600 }
601 
602 void
604 {
605  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
606  if (Dir(i).active())
607  Dir(i).evictIfFound(key);
608  }
609 }
610 
611 bool
613 {
614  if (const int cacheDirs = Config.cacheSwap.n_configured) {
615  // ask each cache_dir until the entry is found; use static starting
616  // point to avoid asking the same subset of disks more often
617  // TODO: coordinate with put() to be able to guess the right disk often
618  static int idx = 0;
619  for (int n = 0; n < cacheDirs; ++n) {
620  idx = (idx + 1) % cacheDirs;
621  SwapDir &sd = Dir(idx);
622  if (!sd.active())
623  continue;
624 
625  if (sd.anchorToCache(entry, inSync)) {
626  debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
627  return true;
628  }
629  }
630  }
631 
632  debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
633  " cache_dirs have " << entry);
634  return false;
635 }
636 
637 bool
639 {
640  return entry.hasDisk() &&
641  entry.disk().updateAnchored(entry);
642 }
643 
644 bool
646 {
647  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
648  // A mix is not supported, but we conservatively check every
649  // dir because features like collapsed revalidation should
650  // currently be disabled if any dir is SMP-aware
651  if (Dir(i).smpAware())
652  return true;
653  }
654  return false;
655 }
656 
657 SwapDir *
659 {
660  return storeDirSelectSwapDir(e);
661 }
662 
663 bool
665 {
666  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
667  if (Dir(i).active() && Dir(i).hasReadableEntry(e))
668  return true;
669  return false;
670 }
671 
672 void
674 {
675  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
676  SwapDirByIndex(dirn).openLog();
677 }
678 
679 void
681 {
682  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
683  SwapDirByIndex(dirn).closeLog();
684 }
685 
695 int
697 {
698  const StoreEntry *e = NULL;
699  int n = 0;
700 
701  struct timeval start;
702  double dt;
703  int dirn;
704  int notdone = 1;
705 
706  // Check for store_dirs_rebuilding because fatal() often calls us in early
707  // initialization phases, before store log is initialized and ready. Also,
708  // some stores do not support log cleanup during Store rebuilding.
710  debugs(20, Important(37), "Not currently OK to rewrite swap log.");
711  debugs(20, Important(38), "storeDirWriteCleanLogs: Operation aborted.");
712  return 0;
713  }
714 
715  debugs(20, Important(39), "storeDirWriteCleanLogs: Starting...");
716  getCurrentTime();
717  start = current_time;
718 
719  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
720  auto &sd = SwapDirByIndex(dirn);
721 
722  if (sd.writeCleanStart() < 0) {
723  debugs(20, DBG_IMPORTANT, "ERROR: log.clean.start() failed for dir #" << sd.index);
724  continue;
725  }
726  }
727 
728  /*
729  * This may look inefficient as CPU wise it is more efficient to do this
730  * sequentially, but I/O wise the parallellism helps as it allows more
731  * hdd spindles to be active.
732  */
733  while (notdone) {
734  notdone = 0;
735 
736  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
737  auto &sd = SwapDirByIndex(dirn);
738 
739  if (!sd.cleanLog)
740  continue;
741 
742  e = sd.cleanLog->nextEntry();
743 
744  if (!e)
745  continue;
746 
747  notdone = 1;
748 
749  if (!sd.canLog(*e))
750  continue;
751 
752  sd.cleanLog->write(*e);
753 
754  if ((++n & 0xFFFF) == 0) {
755  getCurrentTime();
756  debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
757  " entries written so far.");
758  }
759  }
760  }
761 
762  /* Flush */
763  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
765 
766  if (reopen)
768 
769  getCurrentTime();
770 
771  dt = tvSubDsec(start, current_time);
772 
773  debugs(20, Important(40), " Finished. Wrote " << n << " entries.");
774  debugs(20, Important(41), " Took "<< std::setw(3) << std::setprecision(2) << dt <<
775  " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
776 
777  return n;
778 }
779 
780 /* Globals that should be converted to static Store::Disks methods */
781 
782 void
784 {
785  if (!swap.swapDirs) {
786  swap.n_allocated = 4;
787  swap.swapDirs = new SwapDir::Pointer[swap.n_allocated];
788  }
789 
790  if (swap.n_allocated == swap.n_configured) {
791  swap.n_allocated <<= 1;
792  const auto tmp = new SwapDir::Pointer[swap.n_allocated];
793  for (int i = 0; i < swap.n_configured; ++i) {
794  tmp[i] = swap.swapDirs[i];
795  }
796  delete[] swap.swapDirs;
797  swap.swapDirs = tmp;
798  }
799 }
800 
801 void
803 {
804  /* DON'T FREE THESE FOR RECONFIGURE */
805 
806  if (reconfiguring)
807  return;
808 
809  /* TODO XXX this lets the swapdir free resources asynchronously
810  * swap->swapDirs[i]->deactivate();
811  * but there may be such a means already.
812  * RBC 20041225
813  */
814 
815  // only free's the array memory itself
816  // the SwapDir objects may remain (ref-counted)
817  delete[] swap->swapDirs;
818  swap->swapDirs = nullptr;
819  swap->n_allocated = 0;
820  swap->n_configured = 0;
821 }
822 
823 /* Globals that should be moved to some Store::UFS-specific logging module */
824 
834 void
835 storeDirSwapLog(const StoreEntry * e, int op)
836 {
837  assert (e);
839  assert(e->hasDisk());
840  /*
841  * icons and such; don't write them to the swap log
842  */
843 
844  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
845  return;
846 
847  assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
848 
849  debugs(20, 3, "storeDirSwapLog: " <<
850  swap_log_op_str[op] << " " <<
851  e->getMD5Text() << " " <<
852  e->swap_dirn << " " <<
853  std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
854 
855  e->disk().logEntry(*e, op);
856 }
857 
int64_t accumulateMore(const StoreEntry &) const
Definition: Disks.cc:479
void fatal(const char *message)
Definition: fatal.cc:28
double tvSubDsec(struct timeval t1, struct timeval t2)
Definition: gadgets.cc:44
HASHCMP storeKeyHashCmp
SwapDir * store(int const x) const
Definition: Disks.cc:176
class Ping::pingStats_ stats
virtual uint64_t maxSize() const override
Definition: Disks.cc:322
#define Here()
source code location of the caller
Definition: Here.h:15
#define INDEXSD(i)
Definition: SquidConfig.h:72
#define DBG_CRITICAL
Definition: Stream.h:40
virtual bool active() const
Definition: Disk.cc:236
SwapDir * STDIRSELECT(const StoreEntry *e)
Definition: Disks.cc:27
virtual void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disk.cc:136
virtual void logEntry(const StoreEntry &e, int op) const
Definition: Disk.cc:227
const cache_key * publicKey() const
Definition: Store.h:111
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Disks.cc:516
manages a single cache_dir
Definition: Disk.h:22
unsigned char cache_key
Store key.
Definition: forward.h:29
MemObject * mem_obj
Definition: Store.h:219
size_t memMaxSize
Definition: SquidConfig.h:89
@ KEY_PRIVATE
Definition: enums.h:102
virtual bool updateAnchored(StoreEntry &)
Definition: Controlled.h:45
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:830
virtual void init() override
Definition: Disks.cc:259
virtual void sync() override
prepare for shutdown
Definition: Disks.cc:579
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:645
static SwapDir & Dir(int const idx)
Definition: Disks.cc:182
virtual void evictIfFound(const cache_key *) override
Definition: Disks.cc:603
int objectsPerBucket
Definition: SquidConfig.h:269
int64_t expectedReplySize() const
Definition: MemObject.cc:240
virtual void closeLog()
Definition: Disk.cc:215
uint16_t flags
Definition: Store.h:230
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:429
int64_t endOffset() const
Definition: MemObject.cc:216
int64_t availableForSwapOut() const
buffered bytes we have not swapped out yet
Definition: MemObject.cc:485
virtual void create() override
create system resources needed for this store to operate in the future
Definition: Disks.cc:220
int store_open_disk_fd
@ SWAP_LOG_DEL
Definition: swap_log_op.h:15
@ SWAP_LOG_MAX
Definition: swap_log_op.h:17
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Disks.cc:564
void storeRebuildComplete(StoreRebuildData *dc)
virtual void evictCached(StoreEntry &) override
Definition: Disks.cc:586
static SwapDir * SelectSwapDir(const StoreEntry *)
Definition: Disks.cc:658
virtual void openLog()
Definition: Disk.cc:212
virtual void reference(StoreEntry &) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disks.cc:545
struct Store::Disk::Flags flags
char * store_dir_select_algorithm
Definition: SquidConfig.h:516
struct timeval current_time
the current UNIX time in timeval {seconds, microseconds} format
Definition: gadgets.cc:17
#define NULL
Definition: types.h:166
virtual int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Disks.cc:188
virtual void updateHeaders(StoreEntry *) override
make stored metadata and HTTP headers the same as in the given entry
Definition: Disks.cc:557
virtual bool dereference(StoreEntry &e) override
Definition: Disk.cc:139
#define DBG_PARSE_NOTE(x)
Definition: Stream.h:45
static struct tok * buckets[HASHSIZE]
Definition: parse.c:219
time_t getCurrentTime() STUB_RETVAL(0) int tvSubUsec(struct timeval
virtual bool anchorToCache(StoreEntry &e, bool &inSync) override
Definition: Disks.cc:612
static STDIRSELECT storeDirSelectSwapDirRoundRobin
Definition: Disks.cc:29
virtual void updateHeaders(StoreEntry *)
make stored metadata and HTTP headers the same as in the given entry
Definition: Controlled.h:35
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1891
sdirno swap_dirn
Definition: Store.h:236
#define INT_MAX
Definition: types.h:76
static StoreFileSystem * FindByType(const char *type)
#define EBIT_TEST(flag, bit)
Definition: defines.h:69
int n_strands
number of disk processes required to support all cache_dirs
Definition: SquidConfig.h:70
void free_cachedir(Store::DiskConfig *swap)
Definition: Disks.cc:802
int reconfiguring
static void Parse(DiskConfig &)
parses a single cache_dir configuration line
Definition: Disks.cc:415
@ SWAP_LOG_NOP
Definition: swap_log_op.h:13
static void Dump(const DiskConfig &, StoreEntry &, const char *name)
prints the configuration into the provided StoreEntry
Definition: Disks.cc:468
#define assert(EX)
Definition: assert.h:19
static int64_t objectSizeForDirSelection(const StoreEntry &entry)
Definition: Disks.cc:40
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:87
void configure()
update configuration, including limits (re)calculation
Definition: Disks.cc:380
bool hasReadableEntry(const StoreEntry &) const
whether any of disk caches has entry with e.key
Definition: Disks.cc:664
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: Disks.cc:361
const char * swap_log_op_str[]
@ ENTRY_SPECIAL
Definition: enums.h:84
void write(StoreIOBuffer)
Definition: store.cc:755
hash_table * store_table
void allocate_new_swapdir(Store::DiskConfig &swap)
Definition: Disks.cc:783
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
void storeDirCloseSwapLogs()
Definition: Disks.cc:680
static char * NextToken()
void storeDirSwapLog(const StoreEntry *e, int op)
Definition: Disks.cc:835
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Disks.cc:335
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Disks.cc:374
virtual uint64_t maxSize() const override
Definition: Controller.cc:159
int storeDirWriteCleanLogs(int reopen)
Definition: Disks.cc:696
virtual void writeCleanDone()
Definition: Disk.cc:224
static STDIRSELECT storeDirSelectSwapDirLeastLoad
Definition: Disks.cc:30
an std::runtime_error with thrower location info
Definition: TextException.h:21
HASHHASH storeKeyHashHash
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition: Stream.h:63
#define Must(condition)
Definition: TextException.h:71
#define Important(id)
Definition: Messages.h:91
virtual StoreEntry * get(const cache_key *) override
Definition: Disks.cc:233
#define DBG_IMPORTANT
Definition: Stream.h:41
const char * storeKeyText(const cache_key *key)
int64_t avgObjectSize
Definition: SquidConfig.h:270
SQUIDCEXTERN hash_table * hash_create(HASHCMP *, int, HASHHASH *)
Definition: hash.cc:108
Store::Disk & disk() const
the disk this entry is [being] cached on; asserts for entries w/o a disk
Definition: store.cc:1882
virtual bool dereference(StoreEntry &e) override
Definition: Disks.cc:551
int store_hash_buckets
virtual bool updateAnchored(StoreEntry &) override
Definition: Disks.cc:638
void storeDirOpenSwapLogs()
Definition: Disks.cc:673
virtual uint64_t currentSize() const override
current size
Definition: Disks.cc:348
static STDIRSELECT * storeDirSelectSwapDir
Definition: Disks.cc:35
virtual void stat(StoreEntry &) const override
Definition: Disks.cc:532
int storeKeyHashBuckets(int nbuckets)
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:196
size_t swap_hdr_sz
Definition: MemObject.h:199
const char * getMD5Text() const
Definition: store.cc:204
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:14
virtual bool anchorToCache(StoreEntry &, bool &)
Definition: Controlled.h:40
class SquidConfig Config
Definition: SquidConfig.cc:12
struct SquidConfig::@108 Store
virtual void evictCached(StoreEntry &e)=0
static SwapDir & SwapDirByIndex(const int i)
TODO: Remove when cache_dir-iterating functions are converted to Disks methods.
Definition: Disks.cc:57
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:234
Controller & Root()
safely access controller singleton
Definition: Controller.cc:934
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:687
RefCount< SwapDir > * swapDirs
Definition: SquidConfig.h:66

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors