Disks.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47 Store Directory Routines */
10 
11 #include "squid.h"
12 #include "Debug.h"
13 #include "globals.h"
14 #include "profiler/Profiler.h"
15 #include "SquidConfig.h"
16 #include "Store.h"
17 #include "store/Disk.h"
18 #include "store/Disks.h"
19 #include "store_rebuild.h"
20 #include "swap_log_op.h"
21 #include "util.h" // for tvSubDsec() which should be in SquidTime.h
22 
30 
33 static int64_t
35 {
36  // entry.objectLen() is negative here when we are still STORE_PENDING
37  int64_t minSize = entry.mem_obj->expectedReplySize();
38 
39  // If entry size is unknown, use already accumulated bytes as an estimate.
40  // Controller::accumulateMore() guarantees that there are enough of them.
41  if (minSize < 0)
42  minSize = entry.mem_obj->endOffset();
43 
44  assert(minSize >= 0);
45  minSize += entry.mem_obj->swap_hdr_sz;
46  return minSize;
47 }
48 
54 static int
56 {
57  const int64_t objsize = objectSizeForDirSelection(*e);
58 
59  // Increment the first candidate once per selection (not once per
60  // iteration) to reduce bias when some disk(s) attract more entries.
61  static int firstCandidate = 0;
62  if (++firstCandidate >= Config.cacheSwap.n_configured)
63  firstCandidate = 0;
64 
65  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
66  const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
67  const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
68 
69  int load = 0;
70  if (!sd->canStore(*e, objsize, load))
71  continue;
72 
73  if (load < 0 || load > 1000) {
74  continue;
75  }
76 
77  return dirn;
78  }
79 
80  return -1;
81 }
82 
96 static int
98 {
99  int64_t most_free = 0;
100  int64_t best_objsize = -1;
101  int least_load = INT_MAX;
102  int load;
103  int dirn = -1;
104  int i;
106 
107  const int64_t objsize = objectSizeForDirSelection(*e);
108 
109  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
110  SD = dynamic_cast<SwapDir *>(INDEXSD(i));
111  SD->flags.selected = false;
112 
113  if (!SD->canStore(*e, objsize, load))
114  continue;
115 
116  if (load < 0 || load > 1000)
117  continue;
118 
119  if (load > least_load)
120  continue;
121 
122  const int64_t cur_free = SD->maxSize() - SD->currentSize();
123 
124  /* If the load is equal, then look in more details */
125  if (load == least_load) {
126  /* best max-size fit */
127  if (best_objsize != -1) {
128  // cache_dir with the smallest max-size gets the known-size object
129  // cache_dir with the largest max-size gets the unknown-size object
130  if ((objsize != -1 && SD->maxObjectSize() > best_objsize) ||
131  (objsize == -1 && SD->maxObjectSize() < best_objsize))
132  continue;
133  }
134 
135  /* most free */
136  if (cur_free < most_free)
137  continue;
138  }
139 
140  least_load = load;
141  best_objsize = SD->maxObjectSize();
142  most_free = cur_free;
143  dirn = i;
144  }
145 
146  if (dirn >= 0)
147  dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
148 
149  return dirn;
150 }
151 
153  largestMinimumObjectSize(-1),
154  largestMaximumObjectSize(-1),
155  secondLargestMaximumObjectSize(-1)
156 {
157 }
158 
159 SwapDir *
160 Store::Disks::store(int const x) const
161 {
162  return INDEXSD(x);
163 }
164 
165 SwapDir &
166 Store::Disks::Dir(const int i)
167 {
168  SwapDir *sd = INDEXSD(i);
169  assert(sd);
170  return *sd;
171 }
172 
173 int
175 {
176  int result = 0;
177  int j;
178  static int ndir = 0;
179 
180  do {
181  j = 0;
182 
183  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
184  if (ndir >= Config.cacheSwap.n_configured)
185  ndir = ndir % Config.cacheSwap.n_configured;
186 
187  int temp_result = store(ndir)->callback();
188 
189  ++ndir;
190 
191  j += temp_result;
192 
193  result += temp_result;
194 
195  if (j > 100)
196  fatal ("too much io\n");
197  }
198  } while (j > 0);
199 
200  ++ndir;
201 
202  return result;
203 }
204 
205 void
207 {
208  if (Config.cacheSwap.n_configured == 0) {
209  debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
210  }
211 
212  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
213  if (Dir(i).active())
214  store(i)->create();
215  }
216 }
217 
218 StoreEntry *
220 {
221  if (const int cacheDirs = Config.cacheSwap.n_configured) {
222  // ask each cache_dir until the entry is found; use static starting
223  // point to avoid asking the same subset of disks more often
224  // TODO: coordinate with put() to be able to guess the right disk often
225  static int idx = 0;
226  for (int n = 0; n < cacheDirs; ++n) {
227  idx = (idx + 1) % cacheDirs;
228  SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
229  if (!sd->active())
230  continue;
231 
232  if (StoreEntry *e = sd->get(key)) {
233  debugs(20, 7, "cache_dir " << idx << " has: " << *e);
234  return e;
235  }
236  }
237  }
238 
239  debugs(20, 6, "none of " << Config.cacheSwap.n_configured <<
240  " cache_dirs have " << storeKeyText(key));
241  return nullptr;
242 }
243 
244 void
246 {
247  if (Config.Store.objectsPerBucket <= 0)
248  fatal("'store_objects_per_bucket' should be larger than 0.");
249 
250  if (Config.Store.avgObjectSize <= 0)
251  fatal("'store_avg_object_size' should be larger than 0.");
252 
253  /* Calculate size of hash table (maximum currently 64k buckets). */
254  /* this is very bogus, its specific to the any Store maintaining an
255  * in-core index, not global */
257  debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
258  " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
260  debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
261  /* ideally the full scan period should be configurable, for the
262  * moment it remains at approximately 24 hours. */
264  debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
265  debugs(20, DBG_IMPORTANT, "Max Mem size: " << ( Config.memMaxSize >> 10) << " KB" <<
266  (Config.memShared ? " [shared]" : ""));
267  debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
268 
271 
272  // Increment _before_ any possible storeRebuildComplete() calls so that
273  // storeRebuildComplete() can reliably detect when all disks are done. The
274  // level is decremented in each corresponding storeRebuildComplete() call.
276 
277  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
278  /* this starts a search of the store dirs, loading their
279  * index. under the new Store api this should be
280  * driven by the StoreHashIndex, not by each store.
281  *
282  * That is, the HashIndex should perform a search of each dir it is
283  * indexing to do the hash insertions. The search is then able to
284  * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
285  * 'from-no-log'.
286  *
287  * Step 1: make the store rebuilds use a search internally
288  * Step 2: change the search logic to use the four modes described
289  * above
290  * Step 3: have the hash index walk the searches itself.
291  */
292  if (Dir(i).active())
293  store(i)->init();
294  else
295  storeRebuildComplete(nullptr);
296  }
297 
298  if (strcasecmp(Config.store_dir_select_algorithm, "round-robin") == 0) {
300  debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
301  } else {
303  debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
304  }
305 }
306 
307 uint64_t
309 {
310  uint64_t result = 0;
311 
312  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
313  if (Dir(i).doReportStat())
314  result += store(i)->maxSize();
315  }
316 
317  return result;
318 }
319 
320 uint64_t
322 {
323  uint64_t result = 0;
324 
325  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
326  if (Dir(i).doReportStat())
327  result += store(i)->minSize();
328  }
329 
330  return result;
331 }
332 
333 uint64_t
335 {
336  uint64_t result = 0;
337 
338  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
339  if (Dir(i).doReportStat())
340  result += store(i)->currentSize();
341  }
342 
343  return result;
344 }
345 
346 uint64_t
348 {
349  uint64_t result = 0;
350 
351  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
352  if (Dir(i).doReportStat())
353  result += store(i)->currentCount();
354  }
355 
356  return result;
357 }
358 
359 int64_t
361 {
362  return largestMaximumObjectSize;
363 }
364 
365 void
367 {
368  largestMinimumObjectSize = -1;
369  largestMaximumObjectSize = -1;
370  secondLargestMaximumObjectSize = -1;
371 
372  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
373  const auto &disk = Dir(i);
374  if (!disk.active())
375  continue;
376 
377  if (disk.minObjectSize() > largestMinimumObjectSize)
378  largestMinimumObjectSize = disk.minObjectSize();
379 
380  const auto diskMaxObjectSize = disk.maxObjectSize();
381  if (diskMaxObjectSize > largestMaximumObjectSize) {
382  if (largestMaximumObjectSize >= 0) // was set
383  secondLargestMaximumObjectSize = largestMaximumObjectSize;
384  largestMaximumObjectSize = diskMaxObjectSize;
385  }
386  }
387 }
388 
389 int64_t
391 {
392  const auto accumulated = entry.mem_obj->availableForSwapOut();
393 
394  /*
395  * Keep accumulating more bytes until the set of disks eligible to accept
396  * the entry becomes stable, and, hence, accumulating more is not going to
397  * affect the cache_dir selection. A stable set is usually reached
398  * immediately (or soon) because most configurations either do not use
399  * cache_dirs with explicit min-size/max-size limits or use the same
400  * max-size limit for all cache_dirs (and low min-size limits).
401  */
402 
403  // Can the set of min-size cache_dirs accepting this entry change?
404  if (accumulated < largestMinimumObjectSize)
405  return largestMinimumObjectSize - accumulated;
406 
407  // Can the set of max-size cache_dirs accepting this entry change
408  // (other than when the entry exceeds the largest maximum; see below)?
409  if (accumulated <= secondLargestMaximumObjectSize)
410  return secondLargestMaximumObjectSize - accumulated + 1;
411 
412  /*
413  * Checking largestMaximumObjectSize instead eliminates the risk of starting
414  * to swap out an entry that later grows too big, but also implies huge
415  * accumulation in most environments. Accumulating huge entries not only
416  * consumes lots of RAM but also creates a burst of doPages() write requests
417  * that overwhelm the disk. To avoid these problems, we take the risk and
418  * allow swap out now. The disk will quit swapping out if the entry
419  * eventually grows too big for its selected cache_dir.
420  */
421  debugs(20, 3, "no: " << accumulated << '>' <<
422  secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
423  return 0;
424 }
425 
426 void
428 {
429  // accumulate per-disk cache stats
430  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
431  StoreInfoStats dirStats;
432  store(i)->getStats(dirStats);
433  stats += dirStats;
434  }
435 
436  // common to all disks
437  stats.swap.open_disk_fd = store_open_disk_fd;
438 
439  // memory cache stats are collected in StoreController::getStats(), for now
440 }
441 
442 void
444 {
445  int i;
446 
447  /* Now go through each store, calling its stat routine */
448 
449  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
450  storeAppendPrintf(&output, "\n");
451  store(i)->stat(output);
452  }
453 }
454 
455 void
457 {
458  e.disk().reference(e);
459 }
460 
461 bool
463 {
464  return e.disk().dereference(e);
465 }
466 
467 void
469 {
470  Must(e);
471  return e->disk().updateHeaders(e);
472 }
473 
474 void
476 {
477  int i;
478  /* walk each fs */
479 
480  for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
481  /* XXX FixMe: This should be done "in parallel" on the different
482  * cache_dirs, not one at a time.
483  */
484  /* call the maintain function .. */
485  store(i)->maintain();
486  }
487 }
488 
489 void
491 {
492  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
493  store(i)->sync();
494 }
495 
496 void
498  if (e.hasDisk()) {
499  // TODO: move into Fs::Ufs::UFSSwapDir::evictCached()
500  if (!EBIT_TEST(e.flags, KEY_PRIVATE)) {
501  // log before evictCached() below may clear hasDisk()
503  }
504 
505  e.disk().evictCached(e);
506  return;
507  }
508 
509  if (const auto key = e.publicKey())
510  evictIfFound(key);
511 }
512 
513 void
515 {
516  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
517  if (Dir(i).active())
518  Dir(i).evictIfFound(key);
519  }
520 }
521 
522 bool
524 {
525  if (const int cacheDirs = Config.cacheSwap.n_configured) {
526  // ask each cache_dir until the entry is found; use static starting
527  // point to avoid asking the same subset of disks more often
528  // TODO: coordinate with put() to be able to guess the right disk often
529  static int idx = 0;
530  for (int n = 0; n < cacheDirs; ++n) {
531  idx = (idx + 1) % cacheDirs;
532  SwapDir &sd = Dir(idx);
533  if (!sd.active())
534  continue;
535 
536  if (sd.anchorToCache(entry, inSync)) {
537  debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
538  return true;
539  }
540  }
541  }
542 
543  debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
544  " cache_dirs have " << entry);
545  return false;
546 }
547 
548 bool
550 {
551  return entry.hasDisk() &&
552  Dir(entry.swap_dirn).updateAnchored(entry);
553 }
554 
555 bool
557 {
558  for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
559  // A mix is not supported, but we conservatively check every
560  // dir because features like collapsed revalidation should
561  // currently be disabled if any dir is SMP-aware
562  if (Dir(i).smpAware())
563  return true;
564  }
565  return false;
566 }
567 
568 bool
570 {
571  for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
572  if (Dir(i).active() && Dir(i).hasReadableEntry(e))
573  return true;
574  return false;
575 }
576 
577 void
579 {
580  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
581  INDEXSD(dirn)->openLog();
582 }
583 
584 void
586 {
587  for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
588  INDEXSD(dirn)->closeLog();
589 }
590 
600 int
602 {
603  const StoreEntry *e = NULL;
604  int n = 0;
605 
606  struct timeval start;
607  double dt;
609  int dirn;
610  int notdone = 1;
611 
612  // Check for store_dirs_rebuilding because fatal() often calls us in early
613  // initialization phases, before store log is initialized and ready. Also,
614  // some stores do not support log cleanup during Store rebuilding.
616  debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
617  debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
618  return 0;
619  }
620 
621  debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
622  getCurrentTime();
623  start = current_time;
624 
625  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
626  sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
627 
628  if (sd->writeCleanStart() < 0) {
629  debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
630  continue;
631  }
632  }
633 
634  /*
635  * This may look inefficient as CPU wise it is more efficient to do this
636  * sequentially, but I/O wise the parallellism helps as it allows more
637  * hdd spindles to be active.
638  */
639  while (notdone) {
640  notdone = 0;
641 
642  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
643  sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
644 
645  if (NULL == sd->cleanLog)
646  continue;
647 
648  e = sd->cleanLog->nextEntry();
649 
650  if (!e)
651  continue;
652 
653  notdone = 1;
654 
655  if (!sd->canLog(*e))
656  continue;
657 
658  sd->cleanLog->write(*e);
659 
660  if ((++n & 0xFFFF) == 0) {
661  getCurrentTime();
662  debugs(20, DBG_IMPORTANT, " " << std::setw(7) << n <<
663  " entries written so far.");
664  }
665  }
666  }
667 
668  /* Flush */
669  for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
670  dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
671 
672  if (reopen)
674 
675  getCurrentTime();
676 
677  dt = tvSubDsec(start, current_time);
678 
679  debugs(20, DBG_IMPORTANT, " Finished. Wrote " << n << " entries.");
680  debugs(20, DBG_IMPORTANT, " Took "<< std::setw(3)<< std::setprecision(2) << dt <<
681  " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
682 
683  return n;
684 }
685 
686 /* Globals that should be converted to static Store::Disks methods */
687 
688 void
690 {
691  if (!swap->swapDirs) {
692  swap->n_allocated = 4;
693  swap->swapDirs = new SwapDir::Pointer[swap->n_allocated];
694  }
695 
696  if (swap->n_allocated == swap->n_configured) {
697  swap->n_allocated <<= 1;
698  const auto tmp = new SwapDir::Pointer[swap->n_allocated];
699  for (int i = 0; i < swap->n_configured; ++i) {
700  tmp[i] = swap->swapDirs[i];
701  }
702  delete[] swap->swapDirs;
703  swap->swapDirs = tmp;
704  }
705 }
706 
707 void
709 {
710  /* DON'T FREE THESE FOR RECONFIGURE */
711 
712  if (reconfiguring)
713  return;
714 
715  /* TODO XXX this lets the swapdir free resources asynchronously
716  * swap->swapDirs[i]->deactivate();
717  * but there may be such a means already.
718  * RBC 20041225
719  */
720 
721  // only free's the array memory itself
722  // the SwapDir objects may remain (ref-counted)
723  delete[] swap->swapDirs;
724  swap->swapDirs = nullptr;
725  swap->n_allocated = 0;
726  swap->n_configured = 0;
727 }
728 
729 /* Globals that should be moved to some Store::UFS-specific logging module */
730 
740 void
741 storeDirSwapLog(const StoreEntry * e, int op)
742 {
743  assert (e);
745  assert(e->hasDisk());
746  /*
747  * icons and such; don't write them to the swap log
748  */
749 
750  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
751  return;
752 
753  assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
754 
755  debugs(20, 3, "storeDirSwapLog: " <<
756  swap_log_op_str[op] << " " <<
757  e->getMD5Text() << " " <<
758  e->swap_dirn << " " <<
759  std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
760 
761  dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
762 }
763 
int64_t accumulateMore(const StoreEntry &) const
Definition: Disks.cc:390
void fatal(const char *message)
Definition: fatal.cc:28
HASHCMP storeKeyHashCmp
SwapDir * store(int const x) const
Definition: Disks.cc:160
class Ping::pingStats_ stats
virtual uint64_t maxSize() const override
Definition: Disks.cc:308
#define INDEXSD(i)
Definition: SquidConfig.h:71
virtual bool active() const
Definition: Disk.cc:236
virtual bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const =0
check whether we can store the entry; if we can, report current load
Definition: Disk.cc:164
virtual void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disk.cc:136
const cache_key * publicKey() const
Definition: Store.h:103
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Disks.cc:427
manages a single cache_dir
Definition: Disk.h:21
unsigned char cache_key
Store key.
Definition: forward.h:29
MemObject * mem_obj
Definition: Store.h:213
SQUIDCEXTERN double tvSubDsec(struct timeval, struct timeval)
Definition: util.c:46
size_t memMaxSize
Definition: SquidConfig.h:88
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:901
virtual void init() override
Definition: Disks.cc:245
virtual StoreEntry * get(const cache_key *) override
Definition: Disk.cc:393
virtual void sync() override
prepare for shutdown
Definition: Disks.cc:490
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:556
@ KEY_PRIVATE
Definition: enums.h:102
static SwapDir & Dir(int const idx)
Definition: Disks.cc:166
virtual void evictIfFound(const cache_key *) override
Definition: Disks.cc:514
int objectsPerBucket
Definition: SquidConfig.h:267
int64_t expectedReplySize() const
Definition: MemObject.cc:259
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Disk.cc:103
uint16_t flags
Definition: Store.h:224
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:427
int64_t endOffset() const
Definition: MemObject.cc:235
#define DBG_CRITICAL
Definition: Debug.h:45
int64_t availableForSwapOut() const
buffered bytes we have not swapped out yet
Definition: MemObject.cc:514
virtual void create() override
create system resources needed for this store to operate in the future
Definition: Disks.cc:206
time_t getCurrentTime(void)
Get current time.
int store_open_disk_fd
#define DBG_IMPORTANT
Definition: Debug.h:46
@ SWAP_LOG_DEL
Definition: swap_log_op.h:15
@ SWAP_LOG_MAX
Definition: swap_log_op.h:17
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Disks.cc:475
void storeRebuildComplete(StoreRebuildData *dc)
virtual void evictCached(StoreEntry &) override
Definition: Disks.cc:497
@ ENTRY_SPECIAL
Definition: enums.h:84
virtual void reference(StoreEntry &) override
somebody needs this entry (many cache replacement policies need to know)
Definition: Disks.cc:456
struct Store::Disk::Flags flags
char * store_dir_select_algorithm
Definition: SquidConfig.h:514
#define NULL
Definition: types.h:166
virtual int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Disks.cc:174
virtual void updateHeaders(StoreEntry *) override
make stored metadata and HTTP headers the same as in the given entry
Definition: Disks.cc:468
virtual uint64_t maxSize() const override
Definition: Disk.h:48
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:128
virtual bool dereference(StoreEntry &e) override
Definition: Disk.cc:139
static struct tok * buckets[HASHSIZE]
Definition: parse.c:219
virtual bool anchorToCache(StoreEntry &e, bool &inSync) override
Definition: Disks.cc:523
static STDIRSELECT storeDirSelectSwapDirRoundRobin
Definition: Disks.cc:23
virtual void updateHeaders(StoreEntry *)
make stored metadata and HTTP headers the same as in the given entry
Definition: Controlled.h:35
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1990
sdirno swap_dirn
Definition: Store.h:230
#define INT_MAX
Definition: types.h:76
#define EBIT_TEST(flag, bit)
Definition: defines.h:107
void free_cachedir(Store::DiskConfig *swap)
Definition: Disks.cc:708
int reconfiguring
@ SWAP_LOG_NOP
Definition: swap_log_op.h:13
int STDIRSELECT(const StoreEntry *e)
Definition: Disks.h:78
#define assert(EX)
Definition: assert.h:19
static int64_t objectSizeForDirSelection(const StoreEntry &entry)
Definition: Disks.cc:34
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:86
void updateLimits()
slowly calculate (and cache) hi/lo watermarks and similar limits
Definition: Disks.cc:366
bool hasReadableEntry(const StoreEntry &) const
whether any of disk caches has entry with e.key
Definition: Disks.cc:569
bool canLog(StoreEntry const &e) const
Definition: Disk.cc:188
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: Disks.cc:347
const char * swap_log_op_str[]
hash_table * store_table
#define DBG_PARSE_NOTE(x)
Definition: Debug.h:50
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
void storeDirCloseSwapLogs()
Definition: Disks.cc:585
void storeDirSwapLog(const StoreEntry *e, int op)
Definition: Disks.cc:741
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Disks.cc:321
virtual int writeCleanStart()
Definition: Disk.cc:218
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Disks.cc:360
virtual uint64_t maxSize() const override
Definition: Controller.cc:163
int storeDirWriteCleanLogs(int reopen)
Definition: Disks.cc:601
virtual void writeCleanDone()
Definition: Disk.cc:224
struct timeval current_time
Definition: stub_time.cc:15
static STDIRSELECT storeDirSelectSwapDirLeastLoad
Definition: Disks.cc:24
HASHHASH storeKeyHashHash
virtual uint64_t currentSize() const =0
current size
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:69
virtual StoreEntry * get(const cache_key *) override
Definition: Disks.cc:219
CleanLog * cleanLog
Definition: Disk.h:140
const char * storeKeyText(const cache_key *key)
int index
Definition: Disk.h:103
int64_t avgObjectSize
Definition: SquidConfig.h:268
SQUIDCEXTERN hash_table * hash_create(HASHCMP *, int, HASHHASH *)
Definition: hash.cc:109
Store::Disk & disk() const
the disk this entry is [being] cached on; asserts for entries w/o a disk
Definition: store.cc:1981
virtual bool dereference(StoreEntry &e) override
Definition: Disks.cc:462
virtual void write(StoreEntry const &)=0
int store_hash_buckets
virtual bool updateAnchored(StoreEntry &) override
Definition: Disks.cc:549
void storeDirOpenSwapLogs()
Definition: Disks.cc:578
virtual uint64_t currentSize() const override
current size
Definition: Disks.cc:334
struct SquidConfig::@110 Store
STDIRSELECT * storeDirSelectSwapDir
Definition: Disks.cc:29
virtual const StoreEntry * nextEntry()=0
virtual void stat(StoreEntry &) const override
Definition: Disks.cc:443
int storeKeyHashBuckets(int nbuckets)
void allocate_new_swapdir(Store::DiskConfig *swap)
Definition: Disks.cc:689
size_t swap_hdr_sz
Definition: MemObject.h:199
const char * getMD5Text() const
Definition: store.cc:205
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:13
virtual bool anchorToCache(StoreEntry &, bool &)
Definition: Controlled.h:40
class SquidConfig Config
Definition: SquidConfig.cc:12
virtual void evictCached(StoreEntry &e)=0
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition: Store.h:228
Controller & Root()
safely access controller singleton
Definition: Controller.cc:938
RefCount< SwapDir > * swapDirs
Definition: SquidConfig.h:65

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors