Disks.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "base/IoManip.h"
13 #include "cache_cf.h"
14 #include "ConfigParser.h"
15 #include "debug/Messages.h"
16 #include "debug/Stream.h"
17 #include "globals.h"
18 #include "sbuf/Stream.h"
19 #include "SquidConfig.h"
20 #include "Store.h"
21 #include "store/Disk.h"
22 #include "store/Disks.h"
23 #include "store_rebuild.h"
24 #include "StoreFileSystem.h"
25 #include "swap_log_op.h"
26 #include "tools.h"
27 
28 typedef SwapDir *STDIRSELECT(const StoreEntry *e);
29 
37 
40 static int64_t
42 {
43  // entry.objectLen() is negative here when we are still STORE_PENDING
44  int64_t minSize = entry.mem_obj->expectedReplySize();
45 
46  // If entry size is unknown, use already accumulated bytes as an estimate.
47  // Controller::accumulateMore() guarantees that there are enough of them.
48  if (minSize < 0)
49  minSize = entry.mem_obj->endOffset();
50 
51  assert(minSize >= 0);
52  minSize += entry.mem_obj->swap_hdr_sz;
53  return minSize;
54 }
55 
57 static SwapDir &
58 SwapDirByIndex(const size_t i)
59 {
61  const auto sd = INDEXSD(i);
62  assert(sd);
63  return *sd;
64 }
65 
71 static SwapDir *
73 {
74  const int64_t objsize = objectSizeForDirSelection(*e);
75 
76  // Increment the first candidate once per selection (not once per
77  // iteration) to reduce bias when some disk(s) attract more entries.
78  static size_t firstCandidate = 0;
79  if (++firstCandidate >= Config.cacheSwap.n_configured)
80  firstCandidate = 0;
81 
82  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
83  const auto dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
84  auto &dir = SwapDirByIndex(dirn);
85 
86  int load = 0;
87  if (!dir.canStore(*e, objsize, load))
88  continue;
89 
90  if (load < 0 || load > 1000) {
91  continue;
92  }
93 
94  return &dir;
95  }
96 
97  return nullptr;
98 }
99 
113 static SwapDir *
115 {
116  int64_t most_free = 0;
117  int64_t best_objsize = -1;
118  int least_load = INT_MAX;
119  int load;
120  SwapDir *selectedDir = nullptr;
121 
122  const int64_t objsize = objectSizeForDirSelection(*e);
123 
124  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
125  auto &sd = SwapDirByIndex(i);
126  sd.flags.selected = false;
127 
128  if (!sd.canStore(*e, objsize, load))
129  continue;
130 
131  if (load < 0 || load > 1000)
132  continue;
133 
134  if (load > least_load)
135  continue;
136 
137  const int64_t cur_free = sd.maxSize() - sd.currentSize();
138 
139  /* If the load is equal, then look in more details */
140  if (load == least_load) {
141  /* best max-size fit */
142  if (best_objsize != -1) {
143  // cache_dir with the smallest max-size gets the known-size object
144  // cache_dir with the largest max-size gets the unknown-size object
145  if ((objsize != -1 && sd.maxObjectSize() > best_objsize) ||
146  (objsize == -1 && sd.maxObjectSize() < best_objsize))
147  continue;
148  }
149 
150  /* most free */
151  if (cur_free < most_free)
152  continue;
153  }
154 
155  least_load = load;
156  best_objsize = sd.maxObjectSize();
157  most_free = cur_free;
158  selectedDir = &sd;
159  }
160 
161  if (selectedDir)
162  selectedDir->flags.selected = true;
163 
164  return selectedDir;
165 }
166 
168  largestMinimumObjectSize(-1),
169  largestMaximumObjectSize(-1),
170  secondLargestMaximumObjectSize(-1)
171 {
172 }
173 
174 SwapDir *
175 Store::Disks::store(const size_t x) const
176 {
177  return &SwapDirByIndex(x);
178 }
179 
180 SwapDir &
181 Store::Disks::Dir(const size_t i)
182 {
183  return SwapDirByIndex(i);
184 }
185 
186 int
188 {
189  int result = 0;
190  int j;
191  static size_t ndir = 0;
192 
193  do {
194  j = 0;
195 
196  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
197  if (ndir >= Config.cacheSwap.n_configured)
198  ndir = ndir % Config.cacheSwap.n_configured;
199 
200  int temp_result = store(ndir)->callback();
201 
202  ++ndir;
203 
204  j += temp_result;
205 
206  result += temp_result;
207 
208  if (j > 100)
209  fatal ("too much io\n");
210  }
211  } while (j > 0);
212 
213  ++ndir;
214 
215  return result;
216 }
217 
218 void
220 {
221  if (Config.cacheSwap.n_configured == 0) {
222  debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
223  }
224 
225  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
226  if (Dir(i).active())
227  store(i)->create();
228  }
229 }
230 
231 StoreEntry *
233 {
234  if (const auto cacheDirs = Config.cacheSwap.n_configured) {
235  // ask each cache_dir until the entry is found; use static starting
236  // point to avoid asking the same subset of disks more often
237  // TODO: coordinate with put() to be able to guess the right disk often
238  static size_t idx = 0;
239  for (size_t n = 0; n < cacheDirs; ++n) {
240  idx = (idx + 1) % cacheDirs;
241  auto &sd = Dir(idx);
242  if (!sd.active())
243  continue;
244 
245  if (auto e = sd.get(key)) {
246  debugs(20, 7, "cache_dir " << idx << " has: " << *e);
247  return e;
248  }
249  }
250  }
251 
252  debugs(20, 6, "none of " << Config.cacheSwap.n_configured <<
253  " cache_dirs have " << storeKeyText(key));
254  return nullptr;
255 }
256 
257 void
259 {
260  if (Config.Store.objectsPerBucket <= 0)
261  fatal("'store_objects_per_bucket' should be larger than 0.");
262 
263  if (Config.Store.avgObjectSize <= 0)
264  fatal("'store_avg_object_size' should be larger than 0.");
265 
266  /* Calculate size of hash table (maximum currently 64k buckets). */
267  /* this is very bogus, its specific to the any Store maintaining an
268  * in-core index, not global */
270  debugs(20, Important(31), "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
271  " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
273  debugs(20, Important(32), "Target number of buckets: " << buckets);
274  /* ideally the full scan period should be configurable, for the
275  * moment it remains at approximately 24 hours. */
277  debugs(20, Important(33), "Using " << store_hash_buckets << " Store buckets");
278  debugs(20, Important(34), "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
279  (Config.memShared ? " [shared]" : ""));
280  debugs(20, Important(35), "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
281 
284 
285  // Increment _before_ any possible storeRebuildComplete() calls so that
286  // storeRebuildComplete() can reliably detect when all disks are done. The
287  // level is decremented in each corresponding storeRebuildComplete() call.
289 
290  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
291  /* this starts a search of the store dirs, loading their
292  * index. under the new Store api this should be
293  * driven by the StoreHashIndex, not by each store.
294  *
295  * That is, the HashIndex should perform a search of each dir it is
296  * indexing to do the hash insertions. The search is then able to
297  * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
298  * 'from-no-log'.
299  *
300  * Step 1: make the store rebuilds use a search internally
301  * Step 2: change the search logic to use the four modes described
302  * above
303  * Step 3: have the hash index walk the searches itself.
304  */
305  if (Dir(i).active())
306  store(i)->init();
307  else
308  storeRebuildComplete(nullptr);
309  }
310 
311  if (strcasecmp(Config.store_dir_select_algorithm, "round-robin") == 0) {
313  debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
314  } else {
316  debugs(47, Important(36), "Using Least Load store dir selection");
317  }
318 }
319 
320 uint64_t
322 {
323  uint64_t result = 0;
324 
325  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
326  if (Dir(i).doReportStat())
327  result += store(i)->maxSize();
328  }
329 
330  return result;
331 }
332 
333 uint64_t
335 {
336  uint64_t result = 0;
337 
338  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
339  if (Dir(i).doReportStat())
340  result += store(i)->minSize();
341  }
342 
343  return result;
344 }
345 
346 uint64_t
348 {
349  uint64_t result = 0;
350 
351  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
352  if (Dir(i).doReportStat())
353  result += store(i)->currentSize();
354  }
355 
356  return result;
357 }
358 
359 uint64_t
361 {
362  uint64_t result = 0;
363 
364  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
365  if (Dir(i).doReportStat())
366  result += store(i)->currentCount();
367  }
368 
369  return result;
370 }
371 
372 int64_t
374 {
375  return largestMaximumObjectSize;
376 }
377 
378 void
380 {
382  Controller::store_dirs_rebuilding = 0; // nothing to index
383 
384  largestMinimumObjectSize = -1;
385  largestMaximumObjectSize = -1;
386  secondLargestMaximumObjectSize = -1;
387 
389 
390  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
391  auto &disk = Dir(i);
392  if (disk.needsDiskStrand()) {
393  assert(InDaemonMode());
394  // XXX: Do not pretend to support disk.disker changes during reconfiguration
395  disk.disker = Config.workers + (++Config.cacheSwap.n_strands);
396  }
397 
398  if (!disk.active())
399  continue;
400 
401  if (disk.minObjectSize() > largestMinimumObjectSize)
402  largestMinimumObjectSize = disk.minObjectSize();
403 
404  const auto diskMaxObjectSize = disk.maxObjectSize();
405  if (diskMaxObjectSize > largestMaximumObjectSize) {
406  if (largestMaximumObjectSize >= 0) // was set
407  secondLargestMaximumObjectSize = largestMaximumObjectSize;
408  largestMaximumObjectSize = diskMaxObjectSize;
409  }
410  }
411 }
412 
413 void
415 {
416  const auto typeStr = ConfigParser::NextToken();
417  if (!typeStr)
418  throw TextException("missing cache_dir parameter: storage type", Here());
419 
420  const auto pathStr = ConfigParser::NextToken();
421  if (!pathStr)
422  throw TextException("missing cache_dir parameter: directory name", Here());
423 
424  const auto fs = StoreFileSystem::FindByType(typeStr);
425  if (!fs) {
426  debugs(3, DBG_PARSE_NOTE(DBG_IMPORTANT), "ERROR: This proxy does not support the '" << typeStr << "' cache type. Ignoring.");
427  return;
428  }
429 
430  const auto fsType = fs->type();
431 
432  // check for the existing cache_dir
433  // XXX: This code mistreats duplicated cache_dir entries (that should be fatal).
434  for (size_t i = 0; i < swap.n_configured; ++i) {
435  auto &disk = Dir(i);
436  if ((strcasecmp(pathStr, disk.path)) == 0) {
437  /* this is specific to on-fs Stores. The right
438  * way to handle this is probably to have a mapping
439  * from paths to stores, and have on-fs stores
440  * register with that, and lookip in that in their
441  * own setup logic. RBC 20041225. TODO.
442  */
443 
444  if (strcmp(disk.type(), fsType) == 0)
445  disk.reconfigure();
446  else
447  debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " <<
448  disk.type() << " " << disk.path << " to " << fsType << ". Restart required");
449 
450  return;
451  }
452  }
453 
454  const size_t cacheDirCountLimit = 64; // StoreEntry::swap_dirn is a signed 7-bit integer
455  if (swap.n_configured >= cacheDirCountLimit)
456  throw TextException(ToSBuf("Squid cannot handle more than ", cacheDirCountLimit, " cache_dir directives"), Here());
457 
458  // create a new cache_dir
459  allocate_new_swapdir(swap);
460  swap.swapDirs[swap.n_configured] = fs->createSwapDir();
461  auto &disk = Dir(swap.n_configured);
462  disk.parse(swap.n_configured, pathStr);
463  ++swap.n_configured;
464 }
465 
466 void
467 Store::Disks::Dump(const DiskConfig &swap, StoreEntry &entry, const char *name)
468 {
469  for (size_t i = 0; i < swap.n_configured; ++i) {
470  const auto &disk = Dir(i);
471  storeAppendPrintf(&entry, "%s %s %s", name, disk.type(), disk.path);
472  disk.dump(entry);
473  storeAppendPrintf(&entry, "\n");
474  }
475 }
476 
477 int64_t
479 {
480  const auto accumulated = entry.mem_obj->availableForSwapOut();
481 
482  /*
483  * Keep accumulating more bytes until the set of disks eligible to accept
484  * the entry becomes stable, and, hence, accumulating more is not going to
485  * affect the cache_dir selection. A stable set is usually reached
486  * immediately (or soon) because most configurations either do not use
487  * cache_dirs with explicit min-size/max-size limits or use the same
488  * max-size limit for all cache_dirs (and low min-size limits).
489  */
490 
491  // Can the set of min-size cache_dirs accepting this entry change?
492  if (accumulated < largestMinimumObjectSize)
493  return largestMinimumObjectSize - accumulated;
494 
495  // Can the set of max-size cache_dirs accepting this entry change
496  // (other than when the entry exceeds the largest maximum; see below)?
497  if (accumulated <= secondLargestMaximumObjectSize)
498  return secondLargestMaximumObjectSize - accumulated + 1;
499 
500  /*
501  * Checking largestMaximumObjectSize instead eliminates the risk of starting
502  * to swap out an entry that later grows too big, but also implies huge
503  * accumulation in most environments. Accumulating huge entries not only
504  * consumes lots of RAM but also creates a burst of doPages() write requests
505  * that overwhelm the disk. To avoid these problems, we take the risk and
506  * allow swap out now. The disk will quit swapping out if the entry
507  * eventually grows too big for its selected cache_dir.
508  */
509  debugs(20, 3, "no: " << accumulated << '>' <<
510  secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
511  return 0;
512 }
513 
514 void
516 {
517  // accumulate per-disk cache stats
518  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
519  StoreInfoStats dirStats;
520  store(i)->getStats(dirStats);
521  stats += dirStats;
522  }
523 
524  // common to all disks
526 
527  // memory cache stats are collected in StoreController::getStats(), for now
528 }
529 
530 void
532 {
533  /* Now go through each store, calling its stat routine */
534 
535  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
536  storeAppendPrintf(&output, "\n");
537  store(i)->stat(output);
538  }
539 }
540 
541 void
543 {
544  e.disk().reference(e);
545 }
546 
547 bool
549 {
550  return e.disk().dereference(e);
551 }
552 
553 void
555 {
556  Must(e);
557  return e->disk().updateHeaders(e);
558 }
559 
560 void
562 {
563  /* walk each fs */
564 
565  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
566  /* XXX FixMe: This should be done "in parallel" on the different
567  * cache_dirs, not one at a time.
568  */
569  /* call the maintain function .. */
570  store(i)->maintain();
571  }
572 }
573 
574 void
576 {
577  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i)
578  store(i)->sync();
579 }
580 
581 void
583  if (e.hasDisk()) {
584  // TODO: move into Fs::Ufs::UFSSwapDir::evictCached()
585  if (!EBIT_TEST(e.flags, KEY_PRIVATE)) {
586  // log before evictCached() below may clear hasDisk()
588  }
589 
590  e.disk().evictCached(e);
591  return;
592  }
593 
594  if (const auto key = e.publicKey())
595  evictIfFound(key);
596 }
597 
598 void
600 {
601  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
602  if (Dir(i).active())
603  Dir(i).evictIfFound(key);
604  }
605 }
606 
607 bool
609 {
610  if (entry.hasDisk())
611  return true; // already anchored
612 
613  if (const size_t cacheDirs = Config.cacheSwap.n_configured) {
614  // ask each cache_dir until the entry is found; use static starting
615  // point to avoid asking the same subset of disks more often
616  // TODO: coordinate with put() to be able to guess the right disk often
617  static size_t idx = 0;
618  for (size_t n = 0; n < cacheDirs; ++n) {
619  idx = (idx + 1) % cacheDirs;
620  SwapDir &sd = Dir(idx);
621  if (!sd.active())
622  continue;
623 
624  if (sd.anchorToCache(entry)) {
625  debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
626  return true;
627  }
628  }
629  }
630 
631  debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
632  " cache_dirs have " << entry);
633  return false;
634 }
635 
636 bool
638 {
639  return entry.hasDisk() &&
640  entry.disk().updateAnchored(entry);
641 }
642 
643 bool
645 {
646  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
647  // A mix is not supported, but we conservatively check every
648  // dir because features like collapsed revalidation should
649  // currently be disabled if any dir is SMP-aware
650  if (Dir(i).smpAware())
651  return true;
652  }
653  return false;
654 }
655 
656 SwapDir *
658 {
659  return storeDirSelectSwapDir(e);
660 }
661 
662 bool
664 {
665  for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i)
666  if (Dir(i).active() && Dir(i).hasReadableEntry(e))
667  return true;
668  return false;
669 }
670 
671 void
673 {
674  for (size_t dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
675  SwapDirByIndex(dirn).openLog();
676 }
677 
678 void
680 {
681  for (size_t dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
682  SwapDirByIndex(dirn).closeLog();
683 }
684 
694 int
696 {
697  const StoreEntry *e = nullptr;
698  int n = 0;
699 
700  struct timeval start;
701  double dt;
702  int notdone = 1;
703 
704  // Check for store_dirs_rebuilding because fatal() often calls us in early
705  // initialization phases, before store log is initialized and ready. Also,
706  // some stores do not support log cleanup during Store rebuilding.
708  debugs(20, Important(37), "Not currently OK to rewrite swap log.");
709  debugs(20, Important(38), "storeDirWriteCleanLogs: Operation aborted.");
710  return 0;
711  }
712 
713  debugs(20, Important(39), "storeDirWriteCleanLogs: Starting...");
714  getCurrentTime();
715  start = current_time;
716 
717  for (size_t dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
718  auto &sd = SwapDirByIndex(dirn);
719 
720  if (sd.writeCleanStart() < 0) {
721  debugs(20, DBG_IMPORTANT, "ERROR: log.clean.start() failed for dir #" << sd.index);
722  continue;
723  }
724  }
725 
726  /*
727  * This may look inefficient as CPU wise it is more efficient to do this
728  * sequentially, but I/O wise the parallellism helps as it allows more
729  * hdd spindles to be active.
730  */
731  while (notdone) {
732  notdone = 0;
733 
734  for (size_t dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
735  auto &sd = SwapDirByIndex(dirn);
736 
737  if (!sd.cleanLog)
738  continue;
739 
740  e = sd.cleanLog->nextEntry();
741 
742  if (!e)
743  continue;
744 
745  notdone = 1;
746 
747  if (!sd.canLog(*e))
748  continue;
749 
750  sd.cleanLog->write(*e);
751 
752  if ((++n & 0xFFFF) == 0) {
753  getCurrentTime();
754  debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
755  " entries written so far.");
756  }
757  }
758  }
759 
760  /* Flush */
761  for (size_t dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
763 
764  if (reopen)
766 
767  getCurrentTime();
768 
769  dt = tvSubDsec(start, current_time);
770 
771  debugs(20, Important(40), " Finished. Wrote " << n << " entries.");
772  debugs(20, Important(41), " Took "<< std::setw(3) << std::setprecision(2) << dt <<
773  " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
774 
775  return n;
776 }
777 
778 /* Globals that should be converted to static Store::Disks methods */
779 
780 void
782 {
783  if (!swap.swapDirs) {
784  swap.n_allocated = 4;
785  swap.swapDirs = new SwapDir::Pointer[swap.n_allocated];
786  }
787 
788  if (swap.n_allocated == swap.n_configured) {
789  swap.n_allocated <<= 1;
790  const auto tmp = new SwapDir::Pointer[swap.n_allocated];
791  for (size_t i = 0; i < swap.n_configured; ++i) {
792  tmp[i] = swap.swapDirs[i];
793  }
794  delete[] swap.swapDirs;
795  swap.swapDirs = tmp;
796  }
797 }
798 
799 void
801 {
802  /* DON'T FREE THESE FOR RECONFIGURE */
803 
804  if (reconfiguring)
805  return;
806 
807  /* TODO XXX this lets the swapdir free resources asynchronously
808  * swap->swapDirs[i]->deactivate();
809  * but there may be such a means already.
810  * RBC 20041225
811  */
812 
813  // only free's the array memory itself
814  // the SwapDir objects may remain (ref-counted)
815  delete[] swap->swapDirs;
816  swap->swapDirs = nullptr;
817  swap->n_allocated = 0;
818  swap->n_configured = 0;
819 }
820 
821 /* Globals that should be moved to some Store::UFS-specific logging module */
822 
832 void
833 storeDirSwapLog(const StoreEntry * e, int op)
834 {
835  assert (e);
837  assert(e->hasDisk());
838  /*
839  * icons and such; don't write them to the swap log
840  */
841 
842  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
843  return;
844 
845  assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
846 
847  debugs(20, 3, "storeDirSwapLog: " <<
848  swap_log_op_str[op] << " " <<
849  e->getMD5Text() << " " <<
850  e->swap_dirn << " " <<
851  asHex(e->swap_filen).upperCase().minDigits(8));
852 
853  e->disk().logEntry(*e, op);
854 }
855 
int64_t accumulateMore(const StoreEntry &) const
Definition: Disks.cc:478
void fatal(const char *message)
Definition: fatal.cc:28
double tvSubDsec(struct timeval t1, struct timeval t2)
Definition: gadgets.cc:44
HASHCMP storeKeyHashCmp
uint64_t maxSize() const override
Definition: Disks.cc:321
#define Here()
source code location of the caller
Definition: Here.h:15
#define INDEXSD(i)
Definition: SquidConfig.h:74
#define DBG_CRITICAL
Definition: Stream.h:37
virtual bool active() const
Definition: Disk.cc:236
SwapDir * STDIRSELECT(const StoreEntry *e)
Definition: Disks.cc:28
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disk.cc:136
virtual void logEntry(const StoreEntry &e, int op) const
Definition: Disk.cc:227
const cache_key * publicKey() const
Definition: Store.h:112
void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Disks.cc:515
manages a single cache_dir
Definition: Disk.h:21
unsigned char cache_key
Store key.
Definition: forward.h:29
MemObject * mem_obj
Definition: Store.h:220
static SwapDir & Dir(size_t index)
Definition: Disks.cc:181
size_t memMaxSize
Definition: SquidConfig.h:91
virtual bool updateAnchored(StoreEntry &)
Definition: Controlled.h:44
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:855
@ KEY_PRIVATE
Definition: enums.h:97
void init() override
Definition: Disks.cc:258
void sync() override
prepare for shutdown
Definition: Disks.cc:575
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:644
void evictIfFound(const cache_key *) override
Definition: Disks.cc:599
int objectsPerBucket
Definition: SquidConfig.h:264
int64_t expectedReplySize() const
Definition: MemObject.cc:238
virtual void closeLog()
Definition: Disk.cc:215
double open_disk_fd
number of opened disk files
Definition: StoreStats.h:35
uint16_t flags
Definition: Store.h:231
SwapDir * store(size_t index) const
Definition: Disks.cc:175
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:423
int64_t endOffset() const
Definition: MemObject.cc:214
int64_t availableForSwapOut() const
buffered bytes we have not swapped out yet
Definition: MemObject.cc:489
void create() override
create system resources needed for this store to operate in the future
Definition: Disks.cc:219
int store_open_disk_fd
@ SWAP_LOG_DEL
Definition: swap_log_op.h:15
@ SWAP_LOG_MAX
Definition: swap_log_op.h:17
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Disks.cc:561
void storeRebuildComplete(StoreRebuildData *dc)
void evictCached(StoreEntry &) override
Definition: Disks.cc:582
static SwapDir * SelectSwapDir(const StoreEntry *)
Definition: Disks.cc:657
virtual void openLog()
Definition: Disk.cc:212
void reference(StoreEntry &) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disks.cc:542
struct Store::Disk::Flags flags
char * store_dir_select_algorithm
Definition: SquidConfig.h:500
struct timeval current_time
the current UNIX time in timeval {seconds, microseconds} format
Definition: gadgets.cc:18
int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Disks.cc:187
void updateHeaders(StoreEntry *) override
make stored metadata and HTTP headers the same as in the given entry
Definition: Disks.cc:554
bool dereference(StoreEntry &e) override
Definition: Disk.cc:139
#define DBG_PARSE_NOTE(x)
Definition: Stream.h:42
static struct tok * buckets[HASHSIZE]
Definition: parse.c:219
time_t getCurrentTime() STUB_RETVAL(0) int tvSubUsec(struct timeval
static STDIRSELECT storeDirSelectSwapDirRoundRobin
Definition: Disks.cc:30
virtual void updateHeaders(StoreEntry *)
make stored metadata and HTTP headers the same as in the given entry
Definition: Controlled.h:35
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1929
sdirno swap_dirn
Definition: Store.h:237
Swap swap
cache_mem stats
Definition: StoreStats.h:47
#define INT_MAX
Definition: types.h:70
static StoreFileSystem * FindByType(const char *type)
#define EBIT_TEST(flag, bit)
Definition: defines.h:67
int n_strands
number of disk processes required to support all cache_dirs
Definition: SquidConfig.h:72
void free_cachedir(Store::DiskConfig *swap)
Definition: Disks.cc:800
AsHex< Integer > asHex(const Integer n)
a helper to ease AsHex object creation
Definition: IoManip.h:169
virtual bool anchorToCache(StoreEntry &)
Definition: Controlled.h:39
int reconfiguring
static void Parse(DiskConfig &)
parses a single cache_dir configuration line
Definition: Disks.cc:414
struct SquidConfig::@95 Store
@ SWAP_LOG_NOP
Definition: swap_log_op.h:13
static void Dump(const DiskConfig &, StoreEntry &, const char *name)
prints the configuration into the provided StoreEntry
Definition: Disks.cc:467
#define assert(EX)
Definition: assert.h:17
static int64_t objectSizeForDirSelection(const StoreEntry &entry)
Definition: Disks.cc:41
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:89
void configure()
update configuration, including limits (re)calculation
Definition: Disks.cc:379
bool hasReadableEntry(const StoreEntry &) const
whether any of disk caches has entry with e.key
Definition: Disks.cc:663
uint64_t currentCount() const override
the total number of objects stored right now
Definition: Disks.cc:360
const char * swap_log_op_str[]
void write(StoreIOBuffer)
Definition: store.cc:780
hash_table * store_table
void allocate_new_swapdir(Store::DiskConfig &swap)
Definition: Disks.cc:781
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:133
void storeDirCloseSwapLogs()
Definition: Disks.cc:679
static char * NextToken()
void storeDirSwapLog(const StoreEntry *e, int op)
Definition: Disks.cc:833
uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Disks.cc:334
bool anchorToCache(StoreEntry &) override
Definition: Disks.cc:608
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Disks.cc:373
uint64_t maxSize() const override
Definition: Controller.cc:153
int storeDirWriteCleanLogs(int reopen)
Definition: Disks.cc:695
virtual void writeCleanDone()
Definition: Disk.cc:224
static STDIRSELECT storeDirSelectSwapDirLeastLoad
Definition: Disks.cc:31
an std::runtime_error with thrower location info
Definition: TextException.h:20
HASHHASH storeKeyHashHash
hash_table * hash_create(HASHCMP *, int, HASHHASH *)
Definition: hash.cc:108
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition: Stream.h:63
#define Must(condition)
Definition: TextException.h:75
#define Important(id)
Definition: Messages.h:93
StoreEntry * get(const cache_key *) override
Definition: Disks.cc:232
@ ENTRY_SPECIAL
Definition: enums.h:79
#define DBG_IMPORTANT
Definition: Stream.h:38
const char * storeKeyText(const cache_key *key)
int64_t avgObjectSize
Definition: SquidConfig.h:265
Store::Disk & disk() const
the disk this entry is [being] cached on; asserts for entries w/o a disk
Definition: store.cc:1920
bool dereference(StoreEntry &e) override
Definition: Disks.cc:548
int store_hash_buckets
bool updateAnchored(StoreEntry &) override
Definition: Disks.cc:637
void storeDirOpenSwapLogs()
Definition: Disks.cc:672
uint64_t currentSize() const override
current size
Definition: Disks.cc:347
static STDIRSELECT * storeDirSelectSwapDir
Definition: Disks.cc:36
void stat(StoreEntry &) const override
Definition: Disks.cc:531
int storeKeyHashBuckets(int nbuckets)
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:192
size_t swap_hdr_sz
Definition: MemObject.h:216
const char * getMD5Text() const
Definition: store.cc:207
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:13
class SquidConfig Config
Definition: SquidConfig.cc:12
virtual void evictCached(StoreEntry &e)=0
static SwapDir & SwapDirByIndex(const size_t i)
TODO: Remove when cache_dir-iterating functions are converted to Disks methods.
Definition: Disks.cc:58
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:235
Controller & Root()
safely access controller singleton
Definition: Controller.cc:926
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:690
RefCount< SwapDir > * swapDirs
Definition: SquidConfig.h:68

 

Introduction

Documentation

Support

Miscellaneous