Disks.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "cache_cf.h"
13 #include "ConfigParser.h"
14 #include "Debug.h"
15 #include "DebugMessages.h"
16 #include "globals.h"
17 #include "sbuf/Stream.h"
18 #include "SquidConfig.h"
19 #include "Store.h"
20 #include "store/Disk.h"
21 #include "store/Disks.h"
22 #include "store_rebuild.h"
23 #include "StoreFileSystem.h"
24 #include "swap_log_op.h"
25 #include "tools.h"
26 #include "util.h" // for tvSubDsec() which should be in SquidTime.h
27 
28 typedef SwapDir *STDIRSELECT(const StoreEntry *e);
29 
37 
40 static int64_t
42 {
43  // entry.objectLen() is negative here when we are still STORE_PENDING
44  int64_t minSize = entry.mem_obj->expectedReplySize();
45 
46  // If entry size is unknown, use already accumulated bytes as an estimate.
47  // Controller::accumulateMore() guarantees that there are enough of them.
48  if (minSize < 0)
49  minSize = entry.mem_obj->endOffset();
50 
51  assert(minSize >= 0);
52  minSize += entry.mem_obj->swap_hdr_sz;
53  return minSize;
54 }
55 
57 static SwapDir &
58 SwapDirByIndex(const int i)
59 {
60  assert(i >= 0);
62  const auto sd = INDEXSD(i);
63  assert(sd);
64  return *sd;
65 }
66 
72 static SwapDir *
74 {
75  const int64_t objsize = objectSizeForDirSelection(*e);
76 
77  // Increment the first candidate once per selection (not once per
78  // iteration) to reduce bias when some disk(s) attract more entries.
79  static int firstCandidate = 0;
80  if (++firstCandidate >= Config.cacheSwap.n_configured)
81  firstCandidate = 0;
82 
83  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
84  const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
85  auto &dir = SwapDirByIndex(dirn);
86 
87  int load = 0;
88  if (!dir.canStore(*e, objsize, load))
89  continue;
90 
91  if (load < 0 || load > 1000) {
92  continue;
93  }
94 
95  return &dir;
96  }
97 
98  return nullptr;
99 }
100 
114 static SwapDir *
116 {
117  int64_t most_free = 0;
118  int64_t best_objsize = -1;
119  int least_load = INT_MAX;
120  int load;
121  SwapDir *selectedDir = nullptr;
122  int i;
123 
124  const int64_t objsize = objectSizeForDirSelection(*e);
125 
126  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
127  auto &sd = SwapDirByIndex(i);
128  sd.flags.selected = false;
129 
130  if (!sd.canStore(*e, objsize, load))
131  continue;
132 
133  if (load < 0 || load > 1000)
134  continue;
135 
136  if (load > least_load)
137  continue;
138 
139  const int64_t cur_free = sd.maxSize() - sd.currentSize();
140 
141  /* If the load is equal, then look in more details */
142  if (load == least_load) {
143  /* best max-size fit */
144  if (best_objsize != -1) {
145  // cache_dir with the smallest max-size gets the known-size object
146  // cache_dir with the largest max-size gets the unknown-size object
147  if ((objsize != -1 && sd.maxObjectSize() > best_objsize) ||
148  (objsize == -1 && sd.maxObjectSize() < best_objsize))
149  continue;
150  }
151 
152  /* most free */
153  if (cur_free < most_free)
154  continue;
155  }
156 
157  least_load = load;
158  best_objsize = sd.maxObjectSize();
159  most_free = cur_free;
160  selectedDir = &sd;
161  }
162 
163  if (selectedDir)
164  selectedDir->flags.selected = true;
165 
166  return selectedDir;
167 }
168 
170  largestMinimumObjectSize(-1),
171  largestMaximumObjectSize(-1),
172  secondLargestMaximumObjectSize(-1)
173 {
174 }
175 
176 SwapDir *
177 Store::Disks::store(int const x) const
178 {
179  return &SwapDirByIndex(x);
180 }
181 
182 SwapDir &
183 Store::Disks::Dir(const int i)
184 {
185  return SwapDirByIndex(i);
186 }
187 
188 int
190 {
191  int result = 0;
192  int j;
193  static int ndir = 0;
194 
195  do {
196  j = 0;
197 
198  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
199  if (ndir >= Config.cacheSwap.n_configured)
200  ndir = ndir % Config.cacheSwap.n_configured;
201 
202  int temp_result = store(ndir)->callback();
203 
204  ++ndir;
205 
206  j += temp_result;
207 
208  result += temp_result;
209 
210  if (j > 100)
211  fatal ("too much io\n");
212  }
213  } while (j > 0);
214 
215  ++ndir;
216 
217  return result;
218 }
219 
220 void
222 {
223  if (Config.cacheSwap.n_configured == 0) {
224  debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
225  }
226 
227  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
228  if (Dir(i).active())
229  store(i)->create();
230  }
231 }
232 
233 StoreEntry *
235 {
236  if (const int cacheDirs = Config.cacheSwap.n_configured) {
237  // ask each cache_dir until the entry is found; use static starting
238  // point to avoid asking the same subset of disks more often
239  // TODO: coordinate with put() to be able to guess the right disk often
240  static int idx = 0;
241  for (int n = 0; n < cacheDirs; ++n) {
242  idx = (idx + 1) % cacheDirs;
243  auto &sd = Dir(idx);
244  if (!sd.active())
245  continue;
246 
247  if (auto e = sd.get(key)) {
248  debugs(20, 7, "cache_dir " << idx << " has: " << *e);
249  return e;
250  }
251  }
252  }
253 
254  debugs(20, 6, "none of " << Config.cacheSwap.n_configured <<
255  " cache_dirs have " << storeKeyText(key));
256  return nullptr;
257 }
258 
259 void
261 {
262  if (Config.Store.objectsPerBucket <= 0)
263  fatal("'store_objects_per_bucket' should be larger than 0.");
264 
265  if (Config.Store.avgObjectSize <= 0)
266  fatal("'store_avg_object_size' should be larger than 0.");
267 
268  /* Calculate size of hash table (maximum currently 64k buckets). */
269  /* this is very bogus, its specific to the any Store maintaining an
270  * in-core index, not global */
272  debugs(20, Important(31), "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
273  " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
275  debugs(20, Important(32), "Target number of buckets: " << buckets);
276  /* ideally the full scan period should be configurable, for the
277  * moment it remains at approximately 24 hours. */
279  debugs(20, Important(33), "Using " << store_hash_buckets << " Store buckets");
280  debugs(20, Important(34), "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
281  (Config.memShared ? " [shared]" : ""));
282  debugs(20, Important(35), "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
283 
286 
287  // Increment _before_ any possible storeRebuildComplete() calls so that
288  // storeRebuildComplete() can reliably detect when all disks are done. The
289  // level is decremented in each corresponding storeRebuildComplete() call.
291 
292  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
293  /* this starts a search of the store dirs, loading their
294  * index. under the new Store api this should be
295  * driven by the StoreHashIndex, not by each store.
296  *
297  * That is, the HashIndex should perform a search of each dir it is
298  * indexing to do the hash insertions. The search is then able to
299  * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
300  * 'from-no-log'.
301  *
302  * Step 1: make the store rebuilds use a search internally
303  * Step 2: change the search logic to use the four modes described
304  * above
305  * Step 3: have the hash index walk the searches itself.
306  */
307  if (Dir(i).active())
308  store(i)->init();
309  else
310  storeRebuildComplete(nullptr);
311  }
312 
313  if (strcasecmp(Config.store_dir_select_algorithm, "round-robin") == 0) {
315  debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
316  } else {
318  debugs(47, Important(36), "Using Least Load store dir selection");
319  }
320 }
321 
322 uint64_t
324 {
325  uint64_t result = 0;
326 
327  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
328  if (Dir(i).doReportStat())
329  result += store(i)->maxSize();
330  }
331 
332  return result;
333 }
334 
335 uint64_t
337 {
338  uint64_t result = 0;
339 
340  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
341  if (Dir(i).doReportStat())
342  result += store(i)->minSize();
343  }
344 
345  return result;
346 }
347 
348 uint64_t
350 {
351  uint64_t result = 0;
352 
353  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
354  if (Dir(i).doReportStat())
355  result += store(i)->currentSize();
356  }
357 
358  return result;
359 }
360 
361 uint64_t
363 {
364  uint64_t result = 0;
365 
366  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
367  if (Dir(i).doReportStat())
368  result += store(i)->currentCount();
369  }
370 
371  return result;
372 }
373 
374 int64_t
376 {
377  return largestMaximumObjectSize;
378 }
379 
380 void
382 {
384  Controller::store_dirs_rebuilding = 0; // nothing to index
385 
386  largestMinimumObjectSize = -1;
387  largestMaximumObjectSize = -1;
388  secondLargestMaximumObjectSize = -1;
389 
391 
392  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
393  auto &disk = Dir(i);
394  if (disk.needsDiskStrand()) {
395  assert(InDaemonMode());
396  // XXX: Do not pretend to support disk.disker changes during reconfiguration
397  disk.disker = Config.workers + (++Config.cacheSwap.n_strands);
398  }
399 
400  if (!disk.active())
401  continue;
402 
403  if (disk.minObjectSize() > largestMinimumObjectSize)
404  largestMinimumObjectSize = disk.minObjectSize();
405 
406  const auto diskMaxObjectSize = disk.maxObjectSize();
407  if (diskMaxObjectSize > largestMaximumObjectSize) {
408  if (largestMaximumObjectSize >= 0) // was set
409  secondLargestMaximumObjectSize = largestMaximumObjectSize;
410  largestMaximumObjectSize = diskMaxObjectSize;
411  }
412  }
413 }
414 
415 void
417 {
418  const auto typeStr = ConfigParser::NextToken();
419  if (!typeStr)
420  throw TextException("missing cache_dir parameter: storage type", Here());
421 
422  const auto pathStr = ConfigParser::NextToken();
423  if (!pathStr)
424  throw TextException("missing cache_dir parameter: directory name", Here());
425 
426  const auto fs = StoreFileSystem::FindByType(typeStr);
427  if (!fs) {
428  debugs(3, DBG_PARSE_NOTE(DBG_IMPORTANT), "ERROR: This proxy does not support the '" << typeStr << "' cache type. Ignoring.");
429  return;
430  }
431 
432  const auto fsType = fs->type();
433 
434  // check for the existing cache_dir
435  // XXX: This code mistreats duplicated cache_dir entries (that should be fatal).
436  for (int i = 0; i < swap.n_configured; ++i) {
437  auto &disk = Dir(i);
438  if ((strcasecmp(pathStr, disk.path)) == 0) {
439  /* this is specific to on-fs Stores. The right
440  * way to handle this is probably to have a mapping
441  * from paths to stores, and have on-fs stores
442  * register with that, and lookip in that in their
443  * own setup logic. RBC 20041225. TODO.
444  */
445 
446  if (strcmp(disk.type(), fsType) == 0)
447  disk.reconfigure();
448  else
449  debugs(3, DBG_CRITICAL, "ERROR: Can't change type of existing cache_dir " <<
450  disk.type() << " " << disk.path << " to " << fsType << ". Restart required");
451 
452  return;
453  }
454  }
455 
456  const int cacheDirCountLimit = 64; // StoreEntry::swap_dirn is a signed 7-bit integer
457  if (swap.n_configured >= cacheDirCountLimit)
458  throw TextException(ToSBuf("Squid cannot handle more than ", cacheDirCountLimit, " cache_dir directives"), Here());
459 
460  // create a new cache_dir
461  allocate_new_swapdir(swap);
462  swap.swapDirs[swap.n_configured] = fs->createSwapDir();
463  auto &disk = Dir(swap.n_configured);
464  disk.parse(swap.n_configured, pathStr);
465  ++swap.n_configured;
466 }
467 
468 void
469 Store::Disks::Dump(const DiskConfig &swap, StoreEntry &entry, const char *name)
470 {
471  for (int i = 0; i < swap.n_configured; ++i) {
472  const auto &disk = Dir(i);
473  storeAppendPrintf(&entry, "%s %s %s", name, disk.type(), disk.path);
474  disk.dump(entry);
475  storeAppendPrintf(&entry, "\n");
476  }
477 }
478 
479 int64_t
481 {
482  const auto accumulated = entry.mem_obj->availableForSwapOut();
483 
484  /*
485  * Keep accumulating more bytes until the set of disks eligible to accept
486  * the entry becomes stable, and, hence, accumulating more is not going to
487  * affect the cache_dir selection. A stable set is usually reached
488  * immediately (or soon) because most configurations either do not use
489  * cache_dirs with explicit min-size/max-size limits or use the same
490  * max-size limit for all cache_dirs (and low min-size limits).
491  */
492 
493  // Can the set of min-size cache_dirs accepting this entry change?
494  if (accumulated < largestMinimumObjectSize)
495  return largestMinimumObjectSize - accumulated;
496 
497  // Can the set of max-size cache_dirs accepting this entry change
498  // (other than when the entry exceeds the largest maximum; see below)?
499  if (accumulated <= secondLargestMaximumObjectSize)
500  return secondLargestMaximumObjectSize - accumulated + 1;
501 
502  /*
503  * Checking largestMaximumObjectSize instead eliminates the risk of starting
504  * to swap out an entry that later grows too big, but also implies huge
505  * accumulation in most environments. Accumulating huge entries not only
506  * consumes lots of RAM but also creates a burst of doPages() write requests
507  * that overwhelm the disk. To avoid these problems, we take the risk and
508  * allow swap out now. The disk will quit swapping out if the entry
509  * eventually grows too big for its selected cache_dir.
510  */
511  debugs(20, 3, "no: " << accumulated << '>' <<
512  secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
513  return 0;
514 }
515 
516 void
518 {
519  // accumulate per-disk cache stats
520  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
521  StoreInfoStats dirStats;
522  store(i)->getStats(dirStats);
523  stats += dirStats;
524  }
525 
526  // common to all disks
527  stats.swap.open_disk_fd = store_open_disk_fd;
528 
529  // memory cache stats are collected in StoreController::getStats(), for now
530 }
531 
532 void
534 {
535  int i;
536 
537  /* Now go through each store, calling its stat routine */
538 
539  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
540  storeAppendPrintf(&output, "\n");
541  store(i)->stat(output);
542  }
543 }
544 
545 void
547 {
548  e.disk().reference(e);
549 }
550 
551 bool
553 {
554  return e.disk().dereference(e);
555 }
556 
557 void
559 {
560  Must(e);
561  return e->disk().updateHeaders(e);
562 }
563 
564 void
566 {
567  int i;
568  /* walk each fs */
569 
570  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
571  /* XXX FixMe: This should be done "in parallel" on the different
572  * cache_dirs, not one at a time.
573  */
574  /* call the maintain function .. */
575  store(i)->maintain();
576  }
577 }
578 
579 void
581 {
582  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
583  store(i)->sync();
584 }
585 
586 void
588  if (e.hasDisk()) {
589  // TODO: move into Fs::Ufs::UFSSwapDir::evictCached()
590  if (!EBIT_TEST(e.flags, KEY_PRIVATE)) {
591  // log before evictCached() below may clear hasDisk()
593  }
594 
595  e.disk().evictCached(e);
596  return;
597  }
598 
599  if (const auto key = e.publicKey())
600  evictIfFound(key);
601 }
602 
603 void
605 {
606  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
607  if (Dir(i).active())
608  Dir(i).evictIfFound(key);
609  }
610 }
611 
612 bool
614 {
615  if (const int cacheDirs = Config.cacheSwap.n_configured) {
616  // ask each cache_dir until the entry is found; use static starting
617  // point to avoid asking the same subset of disks more often
618  // TODO: coordinate with put() to be able to guess the right disk often
619  static int idx = 0;
620  for (int n = 0; n < cacheDirs; ++n) {
621  idx = (idx + 1) % cacheDirs;
622  SwapDir &sd = Dir(idx);
623  if (!sd.active())
624  continue;
625 
626  if (sd.anchorToCache(entry, inSync)) {
627  debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
628  return true;
629  }
630  }
631  }
632 
633  debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
634  " cache_dirs have " << entry);
635  return false;
636 }
637 
638 bool
640 {
641  return entry.hasDisk() &&
642  entry.disk().updateAnchored(entry);
643 }
644 
645 bool
647 {
648  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
649  // A mix is not supported, but we conservatively check every
650  // dir because features like collapsed revalidation should
651  // currently be disabled if any dir is SMP-aware
652  if (Dir(i).smpAware())
653  return true;
654  }
655  return false;
656 }
657 
658 SwapDir *
660 {
661  return storeDirSelectSwapDir(e);
662 }
663 
664 bool
666 {
667  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
668  if (Dir(i).active() && Dir(i).hasReadableEntry(e))
669  return true;
670  return false;
671 }
672 
673 void
675 {
676  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
677  SwapDirByIndex(dirn).openLog();
678 }
679 
680 void
682 {
683  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
684  SwapDirByIndex(dirn).closeLog();
685 }
686 
696 int
698 {
699  const StoreEntry *e = NULL;
700  int n = 0;
701 
702  struct timeval start;
703  double dt;
704  int dirn;
705  int notdone = 1;
706 
707  // Check for store_dirs_rebuilding because fatal() often calls us in early
708  // initialization phases, before store log is initialized and ready. Also,
709  // some stores do not support log cleanup during Store rebuilding.
711  debugs(20, Important(37), "Not currently OK to rewrite swap log.");
712  debugs(20, Important(38), "storeDirWriteCleanLogs: Operation aborted.");
713  return 0;
714  }
715 
716  debugs(20, Important(39), "storeDirWriteCleanLogs: Starting...");
717  getCurrentTime();
718  start = current_time;
719 
720  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
721  auto &sd = SwapDirByIndex(dirn);
722 
723  if (sd.writeCleanStart() < 0) {
724  debugs(20, DBG_IMPORTANT, "ERROR: log.clean.start() failed for dir #" << sd.index);
725  continue;
726  }
727  }
728 
729  /*
730  * This may look inefficient as CPU wise it is more efficient to do this
731  * sequentially, but I/O wise the parallellism helps as it allows more
732  * hdd spindles to be active.
733  */
734  while (notdone) {
735  notdone = 0;
736 
737  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
738  auto &sd = SwapDirByIndex(dirn);
739 
740  if (!sd.cleanLog)
741  continue;
742 
743  e = sd.cleanLog->nextEntry();
744 
745  if (!e)
746  continue;
747 
748  notdone = 1;
749 
750  if (!sd.canLog(*e))
751  continue;
752 
753  sd.cleanLog->write(*e);
754 
755  if ((++n & 0xFFFF) == 0) {
756  getCurrentTime();
757  debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
758  " entries written so far.");
759  }
760  }
761  }
762 
763  /* Flush */
764  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
766 
767  if (reopen)
769 
770  getCurrentTime();
771 
772  dt = tvSubDsec(start, current_time);
773 
774  debugs(20, Important(40), " Finished. Wrote " << n << " entries.");
775  debugs(20, Important(41), " Took "<< std::setw(3) << std::setprecision(2) << dt <<
776  " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
777 
778  return n;
779 }
780 
781 /* Globals that should be converted to static Store::Disks methods */
782 
783 void
785 {
786  if (!swap.swapDirs) {
787  swap.n_allocated = 4;
788  swap.swapDirs = new SwapDir::Pointer[swap.n_allocated];
789  }
790 
791  if (swap.n_allocated == swap.n_configured) {
792  swap.n_allocated <<= 1;
793  const auto tmp = new SwapDir::Pointer[swap.n_allocated];
794  for (int i = 0; i < swap.n_configured; ++i) {
795  tmp[i] = swap.swapDirs[i];
796  }
797  delete[] swap.swapDirs;
798  swap.swapDirs = tmp;
799  }
800 }
801 
802 void
804 {
805  /* DON'T FREE THESE FOR RECONFIGURE */
806 
807  if (reconfiguring)
808  return;
809 
810  /* TODO XXX this lets the swapdir free resources asynchronously
811  * swap->swapDirs[i]->deactivate();
812  * but there may be such a means already.
813  * RBC 20041225
814  */
815 
816  // only free's the array memory itself
817  // the SwapDir objects may remain (ref-counted)
818  delete[] swap->swapDirs;
819  swap->swapDirs = nullptr;
820  swap->n_allocated = 0;
821  swap->n_configured = 0;
822 }
823 
824 /* Globals that should be moved to some Store::UFS-specific logging module */
825 
835 void
836 storeDirSwapLog(const StoreEntry * e, int op)
837 {
838  assert (e);
840  assert(e->hasDisk());
841  /*
842  * icons and such; don't write them to the swap log
843  */
844 
845  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
846  return;
847 
848  assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
849 
850  debugs(20, 3, "storeDirSwapLog: " <<
851  swap_log_op_str[op] << " " <<
852  e->getMD5Text() << " " <<
853  e->swap_dirn << " " <<
854  std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
855 
856  e->disk().logEntry(*e, op);
857 }
858 
int64_t accumulateMore(const StoreEntry &) const
Definition: Disks.cc:480
void fatal(const char *message)
Definition: fatal.cc:28
HASHCMP storeKeyHashCmp
SwapDir * store(int const x) const
Definition: Disks.cc:177
class Ping::pingStats_ stats
virtual uint64_t maxSize() const override
Definition: Disks.cc:323
#define Here()
source code location of the caller
Definition: Here.h:15
#define INDEXSD(i)
Definition: SquidConfig.h:72
virtual bool active() const
Definition: Disk.cc:236
SwapDir * STDIRSELECT(const StoreEntry *e)
Definition: Disks.cc:28
virtual void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disk.cc:136
virtual void logEntry(const StoreEntry &e, int op) const
Definition: Disk.cc:227
const cache_key * publicKey() const
Definition: Store.h:112
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Disks.cc:517
manages a single cache_dir
Definition: Disk.h:22
unsigned char cache_key
Store key.
Definition: forward.h:29
MemObject * mem_obj
Definition: Store.h:222
SQUIDCEXTERN double tvSubDsec(struct timeval, struct timeval)
Definition: util.c:46
size_t memMaxSize
Definition: SquidConfig.h:89
@ KEY_PRIVATE
Definition: enums.h:102
virtual bool updateAnchored(StoreEntry &)
Definition: Controlled.h:45
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:869
virtual void init() override
Definition: Disks.cc:260
virtual void sync() override
prepare for shutdown
Definition: Disks.cc:580
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:646
static SwapDir & Dir(int const idx)
Definition: Disks.cc:183
virtual void evictIfFound(const cache_key *) override
Definition: Disks.cc:604
int objectsPerBucket
Definition: SquidConfig.h:269
int64_t expectedReplySize() const
Definition: MemObject.cc:240
virtual void closeLog()
Definition: Disk.cc:215
uint16_t flags
Definition: Store.h:233
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:430
int64_t endOffset() const
Definition: MemObject.cc:216
#define DBG_CRITICAL
Definition: Debug.h:40
int64_t availableForSwapOut() const
buffered bytes we have not swapped out yet
Definition: MemObject.cc:485
virtual void create() override
create system resources needed for this store to operate in the future
Definition: Disks.cc:221
time_t getCurrentTime(void)
Get current time.
int store_open_disk_fd
#define DBG_IMPORTANT
Definition: Debug.h:41
@ SWAP_LOG_DEL
Definition: swap_log_op.h:15
@ SWAP_LOG_MAX
Definition: swap_log_op.h:17
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Disks.cc:565
void storeRebuildComplete(StoreRebuildData *dc)
virtual void evictCached(StoreEntry &) override
Definition: Disks.cc:587
static SwapDir * SelectSwapDir(const StoreEntry *)
Definition: Disks.cc:659
virtual void openLog()
Definition: Disk.cc:212
virtual void reference(StoreEntry &) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disks.cc:546
struct Store::Disk::Flags flags
char * store_dir_select_algorithm
Definition: SquidConfig.h:517
#define NULL
Definition: types.h:166
virtual int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Disks.cc:189
virtual void updateHeaders(StoreEntry *) override
make stored metadata and HTTP headers the same as in the given entry
Definition: Disks.cc:558
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:123
virtual bool dereference(StoreEntry &e) override
Definition: Disk.cc:139
static struct tok * buckets[HASHSIZE]
Definition: parse.c:219
virtual bool anchorToCache(StoreEntry &e, bool &inSync) override
Definition: Disks.cc:613
static STDIRSELECT storeDirSelectSwapDirRoundRobin
Definition: Disks.cc:30
virtual void updateHeaders(StoreEntry *)
make stored metadata and HTTP headers the same as in the given entry
Definition: Controlled.h:35
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1930
sdirno swap_dirn
Definition: Store.h:239
#define INT_MAX
Definition: types.h:76
static StoreFileSystem * FindByType(const char *type)
#define EBIT_TEST(flag, bit)
Definition: defines.h:69
int n_strands
number of disk processes required to support all cache_dirs
Definition: SquidConfig.h:70
void free_cachedir(Store::DiskConfig *swap)
Definition: Disks.cc:803
int reconfiguring
static void Parse(DiskConfig &)
parses a single cache_dir configuration line
Definition: Disks.cc:416
@ SWAP_LOG_NOP
Definition: swap_log_op.h:13
static void Dump(const DiskConfig &, StoreEntry &, const char *name)
prints the configuration into the provided StoreEntry
Definition: Disks.cc:469
#define assert(EX)
Definition: assert.h:19
static int64_t objectSizeForDirSelection(const StoreEntry &entry)
Definition: Disks.cc:41
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:87
void configure()
update configuration, including limits (re)calculation
Definition: Disks.cc:381
bool hasReadableEntry(const StoreEntry &) const
whether any of disk caches has entry with e.key
Definition: Disks.cc:665
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: Disks.cc:362
const char * swap_log_op_str[]
@ ENTRY_SPECIAL
Definition: enums.h:84
void write(StoreIOBuffer)
Definition: store.cc:794
hash_table * store_table
void allocate_new_swapdir(Store::DiskConfig &swap)
Definition: Disks.cc:784
#define DBG_PARSE_NOTE(x)
Definition: Debug.h:45
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
void storeDirCloseSwapLogs()
Definition: Disks.cc:681
static char * NextToken()
void storeDirSwapLog(const StoreEntry *e, int op)
Definition: Disks.cc:836
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Disks.cc:336
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Disks.cc:375
virtual uint64_t maxSize() const override
Definition: Controller.cc:159
int storeDirWriteCleanLogs(int reopen)
Definition: Disks.cc:697
#define Important(id)
Definition: DebugMessages.h:91
virtual void writeCleanDone()
Definition: Disk.cc:224
struct timeval current_time
Definition: stub_time.cc:15
static STDIRSELECT storeDirSelectSwapDirLeastLoad
Definition: Disks.cc:31
struct SquidConfig::@109 Store
an std::runtime_error with thrower location info
Definition: TextException.h:20
HASHHASH storeKeyHashHash
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition: Stream.h:63
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:73
virtual StoreEntry * get(const cache_key *) override
Definition: Disks.cc:234
const char * storeKeyText(const cache_key *key)
int64_t avgObjectSize
Definition: SquidConfig.h:270
SQUIDCEXTERN hash_table * hash_create(HASHCMP *, int, HASHHASH *)
Definition: hash.cc:108
Store::Disk & disk() const
the disk this entry is [being] cached on; asserts for entries w/o a disk
Definition: store.cc:1921
virtual bool dereference(StoreEntry &e) override
Definition: Disks.cc:552
int store_hash_buckets
virtual bool updateAnchored(StoreEntry &) override
Definition: Disks.cc:639
void storeDirOpenSwapLogs()
Definition: Disks.cc:674
virtual uint64_t currentSize() const override
current size
Definition: Disks.cc:349
static STDIRSELECT * storeDirSelectSwapDir
Definition: Disks.cc:36
virtual void stat(StoreEntry &) const override
Definition: Disks.cc:533
int storeKeyHashBuckets(int nbuckets)
size_t swap_hdr_sz
Definition: MemObject.h:199
const char * getMD5Text() const
Definition: store.cc:205
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:14
virtual bool anchorToCache(StoreEntry &, bool &)
Definition: Controlled.h:40
class SquidConfig Config
Definition: SquidConfig.cc:12
virtual void evictCached(StoreEntry &e)=0
static SwapDir & SwapDirByIndex(const int i)
TODO: Remove when cache_dir-iterating functions are converted to Disks methods.
Definition: Disks.cc:58
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:237
Controller & Root()
safely access controller singleton
Definition: Controller.cc:934
bool InDaemonMode()
Whether we are running in daemon mode.
Definition: tools.cc:657
RefCount< SwapDir > * swapDirs
Definition: SquidConfig.h:66

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors