Controller.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 20 Store Controller */
10 
11 #include "squid.h"
12 #include "mem_node.h"
13 #include "MemStore.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "store/Controller.h"
17 #include "store/Disks.h"
18 #include "store/forward.h"
19 #include "store/LocalSearch.h"
20 #include "tools.h"
21 #include "Transients.h"
22 
23 #if HAVE_SYS_WAIT_H
24 #include <sys/wait.h>
25 #endif
26 
27 /*
28  * store_dirs_rebuilding is initialized to _1_ as a hack so that
29  * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
30  * cache_dirs have been read. For example, without this hack, Squid
31  * will try to write clean log files if -kparse fails (because it
32  * calls fatal()).
33  */
35 
37  disks(new Disks),
38  sharedMemStore(nullptr),
39  localMemStore(false),
40  transients(nullptr)
41 {
43 }
44 
47 {
48  // assert at runtime because we cannot `= delete` an overridden destructor
49  assert(!"Controller is never destroyed");
50 }
51 
52 void
54 {
55  if (IamWorkerProcess()) {
56  if (MemStore::Enabled()) {
57  sharedMemStore = new MemStore;
58  sharedMemStore->init();
59  } else if (Config.memMaxSize > 0) {
60  localMemStore = true;
61  }
62  }
63 
64  disks->init();
65 
67  transients = new Transients;
68  transients->init();
69  }
70 }
71 
72 void
74 {
75  disks->create();
76 
77 #if !_SQUID_WINDOWS_
78  pid_t pid;
79  do {
80  PidStatus status;
81  pid = WaitForAnyPid(status, WNOHANG);
82  } while (pid > 0 || (pid < 0 && errno == EINTR));
83 #endif
84 }
85 
86 void
88 {
89  static time_t last_warn_time = 0;
90 
91  disks->maintain();
92 
93  /* this should be emitted by the oversize dir, not globally */
94 
95  if (Root().currentSize() > Store::Root().maxSize()) {
96  if (squid_curtime - last_warn_time > 10) {
97  debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
98  << Store::Root().currentSize() / 1024.0 << " KB > "
99  << (Store::Root().maxSize() >> 10) << " KB");
100  last_warn_time = squid_curtime;
101  }
102  }
103 }
104 
105 void
107 {
108  if (sharedMemStore)
109  sharedMemStore->getStats(stats);
110  else {
111  // move this code to a non-shared memory cache class when we have it
112  stats.mem.shared = false;
113  stats.mem.capacity = Config.memMaxSize;
114  stats.mem.size = mem_node::StoreMemSize();
115  if (localMemStore) {
116  // XXX: also count internal/in-transit objects
117  stats.mem.count = hot_obj_count;
118  } else {
119  // XXX: count internal/in-transit objects instead
120  stats.mem.count = hot_obj_count;
121  }
122  }
123 
124  disks->getStats(stats);
125 
126  // low-level info not specific to memory or disk cache
129 }
130 
131 void
133 {
134  storeAppendPrintf(&output, "Store Directory Statistics:\n");
135  storeAppendPrintf(&output, "Store Entries : %lu\n",
136  (unsigned long int)StoreEntry::inUseCount());
137  storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
138  maxSize() >> 10);
139  storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
140  currentSize() / 1024.0);
141  storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
142  Math::doublePercent(currentSize(), maxSize()),
143  Math::doublePercent((maxSize() - currentSize()), maxSize()));
144 
145  if (sharedMemStore)
146  sharedMemStore->stat(output);
147 
148  disks->stat(output);
149 }
150 
151 /* if needed, this could be taught to cache the result */
152 uint64_t
154 {
155  /* TODO: include memory cache ? */
156  return disks->maxSize();
157 }
158 
159 uint64_t
161 {
162  /* TODO: include memory cache ? */
163  return disks->minSize();
164 }
165 
166 uint64_t
168 {
169  /* TODO: include memory cache ? */
170  return disks->currentSize();
171 }
172 
173 uint64_t
175 {
176  /* TODO: include memory cache ? */
177  return disks->currentCount();
178 }
179 
180 int64_t
182 {
183  /* TODO: include memory cache ? */
184  return disks->maxObjectSize();
185 }
186 
187 void
189 {
190  disks->configure();
191 
192  store_swap_high = (long) (((float) maxSize() *
193  (float) Config.Swap.highWaterMark) / (float) 100);
194  store_swap_low = (long) (((float) maxSize() *
195  (float) Config.Swap.lowWaterMark) / (float) 100);
197 
198  // TODO: move this into a memory cache class when we have one
199  const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
200  const int64_t disksMax = disks->maxObjectSize();
201  store_maxobjsize = std::max(disksMax, memMax);
202 }
203 
204 StoreSearch *
206 {
207  // this is the only kind of search we currently support
208  return NewLocalSearch();
209 }
210 
211 void
213 {
214  if (sharedMemStore)
215  sharedMemStore->sync();
216  disks->sync();
217 }
218 
219 /*
220  * handle callbacks all available fs'es
221  */
222 int
224 {
225  /* mem cache callbacks ? */
226  return disks->callback();
227 }
228 
230 void
232 {
233  // special entries do not belong to any specific Store, but are IN_MEMORY
234  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
235  return;
236 
237  /* Notify the fs that we're referencing this object again */
238 
239  if (e.hasDisk())
240  disks->reference(e);
241 
242  // Notify the memory cache that we're referencing this object again
243  if (sharedMemStore && e.mem_status == IN_MEMORY)
244  sharedMemStore->reference(e);
245 
246  // TODO: move this code to a non-shared memory cache class when we have it
247  if (e.mem_obj) {
248  if (mem_policy->Referenced)
250  }
251 }
252 
255 bool
257 {
258  // special entries do not belong to any specific Store, but are IN_MEMORY
259  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
260  return true;
261 
262  // idle private entries cannot be reused
263  if (EBIT_TEST(e.flags, KEY_PRIVATE))
264  return false;
265 
266  bool keepInStoreTable = false; // keep only if somebody needs it there
267 
268  // Notify the fs that we are not referencing this object any more. This
269  // should be done even if we overwrite keepInStoreTable afterwards.
270 
271  if (e.hasDisk())
272  keepInStoreTable = disks->dereference(e) || keepInStoreTable;
273 
274  // Notify the memory cache that we're not referencing this object any more
275  if (sharedMemStore && e.mem_status == IN_MEMORY)
276  keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
277 
278  // TODO: move this code to a non-shared memory cache class when we have it
279  if (e.mem_obj) {
282  // non-shared memory cache relies on store_table
283  if (localMemStore)
284  keepInStoreTable = wantsLocalMemory || keepInStoreTable;
285  }
286 
287  if (e.hittingRequiresCollapsing()) {
288  // If we were writing this now-locally-idle entry, then we did not
289  // finish and should now destroy an incomplete entry. Otherwise, do not
290  // leave this idle StoreEntry behind because handleIMSReply() lacks
291  // freshness checks when hitting a collapsed revalidation entry.
292  keepInStoreTable = false; // may overrule fs decisions made above
293  }
294 
295  return keepInStoreTable;
296 }
297 
298 bool
300 {
301  // assuming a public key, checking Transients should cover all cases.
302  return transients && transients->markedForDeletion(key);
303 }
304 
305 bool
307 {
308  // The opposite check order could miss a reader that has arrived after the
309  // !readers() and before the markedForDeletion() check.
310  return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
311  transients && !transients->readers(e);
312 }
313 
314 bool
316 {
317  return disks->hasReadableEntry(e);
318 }
319 
321 void
323 {
324  checkTransients(entry);
325 
326  // The "hittingRequiresCollapsing() has an active writer" checks below
327  // protect callers from getting stuck and/or from using a stale revalidation
328  // reply. However, these protections are not reliable because the writer may
329  // disappear at any time and/or without a trace. Collapsing adds risks...
330  if (entry.hittingRequiresCollapsing()) {
331  if (entry.hasTransients()) {
332  // Too late to check here because the writer may be gone by now, but
333  // Transients do check when they setCollapsingRequirement().
334  } else {
335  // a local writer must hold a lock on its writable entry
336  if (!(entry.locked() && entry.isAccepting()))
337  throw TextException("no local writer", Here());
338  }
339  }
340 }
341 
342 StoreEntry *
344 {
345  if (const auto entry = peek(key)) {
346  try {
347  if (!entry->key)
348  allowSharing(*entry, key);
349  checkFoundCandidate(*entry);
350  entry->touch();
351  referenceBusy(*entry);
352  return entry;
353  } catch (const std::exception &ex) {
354  debugs(20, 2, "failed with " << *entry << ": " << ex.what());
355  entry->release();
356  // fall through
357  }
358  }
359  return nullptr;
360 }
361 
363 void
365 {
366  // anchorToCache() below and many find() callers expect a registered entry
367  addReading(&entry, key);
368 
369  if (entry.hasTransients()) {
370  // store hadWriter before computing `found`; \see Transients::get()
371  const auto hadWriter = transients->hasWriter(entry);
372  const auto found = anchorToCache(entry);
373  if (!found) {
374  // !found should imply hittingRequiresCollapsing() regardless of writer presence
375  if (!entry.hittingRequiresCollapsing()) {
376  debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry);
377  throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here());
378  }
379 
380  if (!hadWriter) {
381  // prevent others from falling into the same trap
382  throw TextException("unattached transients entry missing writer", Here());
383  }
384  }
385  }
386 }
387 
388 StoreEntry *
390 {
391  // We could check for mem_obj presence (and more), moving and merging some
392  // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
393  // but that would mean polluting Store with HTCP/ICP code. Instead, we
394  // should encapsulate callback-related data in a protocol-neutral MemObject
395  // member or use an HTCP/ICP-specific index rather than store_table.
396 
397  // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
398  return static_cast<StoreEntry*>(hash_lookup(store_table, key));
399 }
400 
404 StoreEntry *
406 {
407  if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
408  // callers must only search for public entries
409  assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
410  assert(e->publicKey());
411  checkTransients(*e);
412 
413  // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
414  // because their backing store slot may be gone already.
415  return e;
416  }
417  return nullptr;
418 }
419 
420 StoreEntry *
422 {
423  debugs(20, 3, storeKeyText(key));
424 
425  if (markedForDeletion(key)) {
426  debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
427  return nullptr;
428  }
429 
430  if (StoreEntry *e = peekAtLocal(key)) {
431  debugs(20, 3, "got local in-transit entry: " << *e);
432  return e;
433  }
434 
435  // Must search transients before caches because we must sync those we find.
436  if (transients) {
437  if (StoreEntry *e = transients->get(key)) {
438  debugs(20, 3, "got shared in-transit entry: " << *e);
439  return e;
440  }
441  }
442 
443  if (sharedMemStore) {
444  if (StoreEntry *e = sharedMemStore->get(key)) {
445  debugs(20, 3, "got mem-cached entry: " << *e);
446  return e;
447  }
448  }
449 
450  if (const auto e = disks->get(key)) {
451  debugs(20, 3, "got disk-cached entry: " << *e);
452  return e;
453  }
454 
455  debugs(20, 4, "cannot locate " << storeKeyText(key));
456  return nullptr;
457 }
458 
459 bool
461 {
462  return transients && e.hasTransients() && transients->isReader(e);
463 }
464 
465 bool
467 {
468  return transients && e.hasTransients() && transients->isWriter(e);
469 }
470 
471 int64_t
473 {
474  return disks->accumulateMore(entry);
475  // The memory cache should not influence for-swapout accumulation decision.
476 }
477 
478 // Must be called from StoreEntry::release() or releaseRequest() because
479 // those methods currently manage local indexing of StoreEntry objects.
480 // TODO: Replace StoreEntry::release*() with Root().evictCached().
481 void
483 {
484  debugs(20, 7, e);
485  if (transients)
486  transients->evictCached(e);
487  memoryEvictCached(e);
488  disks->evictCached(e);
489 }
490 
491 void
493 {
494  debugs(20, 7, storeKeyText(key));
495 
496  if (StoreEntry *entry = peekAtLocal(key)) {
497  debugs(20, 5, "marking local in-transit " << *entry);
498  entry->release(true);
499  return;
500  }
501 
502  if (sharedMemStore)
503  sharedMemStore->evictIfFound(key);
504 
505  disks->evictIfFound(key);
506 
507  if (transients)
508  transients->evictIfFound(key);
509 }
510 
512 bool
513 Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
514 {
515  // XXX: We count mem_nodes but may free shared memory pages instead.
516  const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
517  debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
518  return fits;
519 }
520 
521 void
522 Store::Controller::freeMemorySpace(const int bytesRequired)
523 {
524  const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
525 
526  if (memoryCacheHasSpaceFor(pagesRequired))
527  return;
528 
529  // XXX: When store_pages_max is smaller than pagesRequired, we should not
530  // look for more space (but we do because we want to abandon idle entries?).
531 
532  // limit our performance impact to one walk per second
533  static time_t lastWalk = 0;
534  if (lastWalk == squid_curtime)
535  return;
536  lastWalk = squid_curtime;
537 
538  debugs(20, 2, "need " << pagesRequired << " pages");
539 
540  // let abandon()/handleIdleEntry() know about the impeding memory shortage
541  memoryPagesDebt_ = pagesRequired;
542 
543  // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
544  // XXX: Limit iterations by time, not arbitrary count.
545  const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
546  int removed = 0;
547  while (const auto entry = walker->Next(walker)) {
548  // Abandoned memory cache entries are purged during memory shortage.
549  entry->abandon(__func__); // may delete entry
550  ++removed;
551 
552  if (memoryCacheHasSpaceFor(pagesRequired))
553  break;
554  }
555  // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
556  debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
557  walker->Done(walker);
558  memoryPagesDebt_ = 0;
559 }
560 
561 // move this into [non-shared] memory cache class when we have one
563 bool
565 {
566  if (!e.memoryCachable())
567  return false;
568 
569  // does the current and expected size obey memory caching limits?
570  assert(e.mem_obj);
571  const int64_t loadedSize = e.mem_obj->endOffset();
572  const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
573  const int64_t ramSize = max(loadedSize, expectedSize);
574  const int64_t ramLimit = min(
575  static_cast<int64_t>(Config.memMaxSize),
576  static_cast<int64_t>(Config.Store.maxInMemObjSize));
577  return ramSize <= ramLimit;
578 }
579 
580 void
581 Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
582 {
583  bool keepInLocalMemory = false;
584  if (sharedMemStore)
585  sharedMemStore->write(e); // leave keepInLocalMemory false
586  else if (localMemStore)
587  keepInLocalMemory = keepForLocalMemoryCache(e);
588 
589  debugs(20, 7, "keepInLocalMemory: " << keepInLocalMemory);
590 
591  if (!keepInLocalMemory)
592  e.trimMemory(preserveSwappable);
593 }
594 
597 void
599 {
600  // TODO: Untangle memory caching from mem_obj.
601  if (sharedMemStore)
602  sharedMemStore->evictCached(e);
603  else // TODO: move into [non-shared] memory cache class when we have one
604  if (!e.locked())
605  e.destroyMemObject();
606 }
607 
608 void
610 {
611  if (sharedMemStore)
612  sharedMemStore->disconnect(e);
613  // else nothing to do for non-shared memory cache
614 }
615 
616 void
618 {
619  if (transients && e.hasTransients()) // paranoid: the caller should check
620  transients->completeWriting(e);
621 }
622 
623 int
625 {
626  return (transients && e.hasTransients()) ?
627  transients->readers(e) : 0;
628 }
629 
630 void
632 {
633  if (transients)
634  transients->disconnect(e);
635 }
636 
637 void
639 {
640  bool keepInLocalMemory = false;
641 
642  if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
643  // Icons (and cache digests?) should stay in store_table until we
644  // have a dedicated storage for them (that would not purge them).
645  // They are not managed [well] by any specific Store handled below.
646  keepInLocalMemory = true;
647  } else if (sharedMemStore) {
648  // leave keepInLocalMemory false; sharedMemStore maintains its own cache
649  } else if (localMemStore) {
650  keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
651  // the local memory cache is not overflowing
652  memoryCacheHasSpaceFor(memoryPagesDebt_);
653  }
654 
655  // An idle, unlocked entry that only belongs to a SwapDir which controls
656  // its own index, should not stay in the global store_table.
657  if (!dereferenceIdle(e, keepInLocalMemory)) {
658  debugs(20, 5, "destroying unlocked entry: " << &e << ' ' << e);
659  destroyStoreEntry(static_cast<hash_link*>(&e));
660  return;
661  }
662 
663  debugs(20, 5, "keepInLocalMemory: " << keepInLocalMemory);
664 
665  // formerly known as "WARNING: found KEY_PRIVATE"
667 
668  // TODO: move this into [non-shared] memory cache class when we have one
669  if (keepInLocalMemory) {
671  e.mem_obj->unlinkRequest();
672  return;
673  }
674 
675  // We know the in-memory data will be gone. Get rid of the entire entry if
676  // it has nothing worth preserving on disk either.
677  if (!e.swappedOut()) {
678  e.release(); // deletes e
679  return;
680  }
681 
682  memoryEvictCached(e); // may already be gone
683  // and keep the entry in store_table for its on-disk data
684 }
685 
686 bool
688 {
689  Must(old);
690  Must(old->mem_obj);
691  Must(e304.mem_obj);
692 
693  // updateOnNotModified() may be called many times for the same old entry.
694  // e304.mem_obj->appliedUpdates value distinguishes two cases:
695  // false: Independent store clients revalidating the same old StoreEntry.
696  // Each such update uses its own e304. The old StoreEntry
697  // accumulates such independent updates.
698  // true: Store clients feeding off the same 304 response. Each such update
699  // uses the same e304. For timestamps correctness and performance
700  // sake, it is best to detect and skip such repeated update calls.
701  if (e304.mem_obj->appliedUpdates) {
702  debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
703  return true;
704  }
705  e304.mem_obj->appliedUpdates = true;
706 
707  try {
708  if (!old->updateOnNotModified(e304)) {
709  debugs(20, 5, "updated nothing in " << *old << " with " << e304);
710  return true;
711  }
712  } catch (...) {
713  debugs(20, DBG_IMPORTANT, "ERROR: Failed to update a cached response: " << CurrentException);
714  return false;
715  }
716 
717  if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
718  sharedMemStore->updateHeaders(old);
719 
720  if (old->swap_dirn > -1)
721  disks->updateHeaders(old);
722 
723  return true;
724 }
725 
726 bool
728  const HttpRequestMethod &)
729 {
730  const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
731  // set the flag now so that it gets copied into the Transients entry
732  e->setCollapsingRequirement(true);
733  if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
734  debugs(20, 3, "may " << (transients && e->hasTransients() ?
735  "SMP-" : "locally-") << "collapse " << *e);
737  return true;
738  }
739  // paranoid cleanup; the flag is meaningless for private entries
740  e->setCollapsingRequirement(false);
741  return false;
742 }
743 
744 void
746 {
747  if (transients)
748  transients->monitorIo(e, key, Store::ioReading);
749  e->hashInsert(key);
750 }
751 
752 void
754 {
755  assert(e);
756  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
757  return; // constant memory-resident entries do not need transients
758 
759  if (transients)
760  transients->monitorIo(e, key, Store::ioWriting);
761  // else: non-SMP configurations do not need transients
762 }
763 
764 void
766 {
767  assert(transients);
768 
769  StoreEntry *collapsed = transients->findCollapsed(xitIndex);
770  if (!collapsed) { // the entry is no longer active, ignore update
771  debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
772  return;
773  }
774 
775  if (!collapsed->locked()) {
776  debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
777  handleIdleEntry(*collapsed);
778  return;
779  }
780 
781  assert(collapsed->mem_obj);
782 
783  if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
784  debugs(20, 3, "skipping already aborted " << *collapsed);
785  return;
786  }
787 
788  debugs(20, 7, "syncing " << *collapsed);
789 
790  Transients::EntryStatus entryStatus;
791  transients->status(*collapsed, entryStatus);
792 
793  if (entryStatus.waitingToBeFreed) {
794  debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
795  collapsed->release(true); // may already be marked
796  }
797 
798  if (transients->isWriter(*collapsed))
799  return; // readers can only change our waitingToBeFreed flag
800 
801  assert(transients->isReader(*collapsed));
802 
803  bool found = false;
804  bool inSync = false;
805  if (sharedMemStore && collapsed->mem_obj->memCache.io == Store::ioDone) {
806  found = true;
807  inSync = true;
808  debugs(20, 7, "already handled by memory store: " << *collapsed);
809  } else if (sharedMemStore && collapsed->hasMemStore()) {
810  found = true;
811  inSync = sharedMemStore->updateAnchored(*collapsed);
812  // TODO: handle entries attached to both memory and disk
813  } else if (collapsed->hasDisk()) {
814  found = true;
815  inSync = disks->updateAnchored(*collapsed);
816  } else {
817  try {
818  found = anchorToCache(*collapsed);
819  inSync = found;
820  } catch (...) {
821  // TODO: Write an exception handler for the entire method.
822  debugs(20, 3, "anchorToCache() failed for " << *collapsed << ": " << CurrentException);
823  collapsed->abort();
824  return;
825  }
826  }
827 
828  if (entryStatus.waitingToBeFreed && !found) {
829  debugs(20, 3, "aborting unattached " << *collapsed <<
830  " because it was marked for deletion before we could attach it");
831  collapsed->abort();
832  return;
833  }
834 
835  if (inSync) {
836  debugs(20, 5, "synced " << *collapsed);
837  assert(found);
838  collapsed->setCollapsingRequirement(false);
839  collapsed->invokeHandlers();
840  return;
841  }
842 
843  if (found) { // unrecoverable problem syncing this entry
844  debugs(20, 3, "aborting unsyncable " << *collapsed);
845  collapsed->abort();
846  return;
847  }
848 
849  if (!entryStatus.hasWriter) {
850  debugs(20, 3, "aborting abandoned-by-writer " << *collapsed);
851  collapsed->abort();
852  return;
853  }
854 
855  // the entry is still not in one of the caches
856  debugs(20, 7, "waiting " << *collapsed);
857  collapsed->setCollapsingRequirement(true);
858 }
859 
863 bool
865 {
866  assert(entry.hasTransients());
867  assert(transientsReader(entry));
868 
869  // TODO: Attach entries to both memory and disk
870 
871  // TODO: Reduce code duplication with syncCollapsed()
872  if (sharedMemStore && entry.mem().memCache.io == Store::ioDone) {
873  debugs(20, 5, "already handled by memory store: " << entry);
874  return true;
875  } else if (sharedMemStore && entry.hasMemStore()) {
876  debugs(20, 5, "already anchored to memory store: " << entry);
877  return true;
878  } else if (entry.hasDisk()) {
879  debugs(20, 5, "already anchored to disk: " << entry);
880  return true;
881  }
882 
883  debugs(20, 7, "anchoring " << entry);
884 
885  Transients::EntryStatus entryStatus;
886  transients->status(entry, entryStatus);
887 
888  bool found = false;
889  if (sharedMemStore)
890  found = sharedMemStore->anchorToCache(entry);
891  if (!found)
892  found = disks->anchorToCache(entry);
893 
894  if (found) {
895  debugs(20, 7, "anchored " << entry);
896  entry.setCollapsingRequirement(false);
897  return true;
898  }
899 
900  if (entryStatus.waitingToBeFreed)
901  throw TextException("will never be able to anchor to an already marked entry", Here());
902 
903  if (!entryStatus.hasWriter)
904  throw TextException("will never be able to anchor to an abandoned-by-writer entry", Here());
905 
906  debugs(20, 7, "skipping not yet cached " << entry);
907  entry.setCollapsingRequirement(true);
908  return false;
909 }
910 
911 bool
913 {
914  return MemStore::Enabled() || Disks::SmpAware();
915 }
916 
917 void
919 {
920  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
921  return;
922  assert(!transients || e.hasTransients());
923 }
924 
927 {
928  static const auto root = new Controller();
929  return *root;
930 }
931 
int transientReaders(const StoreEntry &) const
number of the transient entry readers some time ago
Definition: Controller.cc:624
static size_t StoreMemSize()
Definition: mem_node.cc:64
void memoryDisconnect(StoreEntry &)
disassociates the entry from the memory cache, preserving cached data
Definition: Controller.cc:609
void noteStoppedSharedWriting(StoreEntry &)
adjust shared state after this worker stopped changing the entry
Definition: Controller.cc:617
uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Controller.cc:160
bool anchorToCache(StoreEntry &)
Definition: Controller.cc:864
#define Here()
source code location of the caller
Definition: Here.h:15
#define SM_PAGE_SIZE
Definition: defines.h:63
#define DBG_CRITICAL
Definition: Stream.h:37
void memoryOut(StoreEntry &, const bool preserveSwappable)
called to get rid of no longer needed entry data in RAM, if any
Definition: Controller.cc:581
bool updateOnNotModified(const StoreEntry &e304)
Definition: store.cc:1452
void create() override
create system resources needed for this store to operate in the future
Definition: Controller.cc:73
double mem_object_count
number of MemObject objects in existence
Definition: StoreStats.h:52
unsigned char cache_key
Store key.
Definition: forward.h:29
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition: MemStore.h:68
bool makePublic(const KeyScope keyScope=ksDefault)
Definition: store.cc:167
bool hasReadableDiskEntry(const StoreEntry &) const
whether there is a disk entry with e.key
Definition: Controller.cc:315
void sync() override
prepare for shutdown
Definition: Controller.cc:212
double count
number of cached objects
Definition: StoreStats.h:21
MemObject * mem_obj
Definition: Store.h:220
StoreEntry * peekAtLocal(const cache_key *)
Definition: Controller.cc:405
bool waitingToBeFreed
whether the entry was marked for deletion
Definition: Transients.h:35
size_t memMaxSize
Definition: SquidConfig.h:91
@ ksRevalidation
Definition: store_key_md5.h:20
static bool SmpAware()
whether there are any SMP-aware storages
Definition: Controller.cc:912
MemObject & mem()
Definition: Store.h:47
void addReading(StoreEntry *, const cache_key *)
Definition: Controller.cc:745
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:855
@ ENTRY_ABORTED
Definition: enums.h:110
@ KEY_PRIVATE
Definition: enums.h:97
void checkTransients(const StoreEntry &) const
Definition: Controller.cc:918
MemCache memCache
current [shared] memory caching state for the entry
Definition: MemObject.h:203
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:649
void init() override
Definition: Controller.cc:53
RemovalPurgeWalker *(* PurgeInit)(RemovalPolicy *policy, int max_scan)
Definition: RemovalPolicy.h:51
void configure()
update configuration, including limits (re)calculation
Definition: Controller.cc:188
int64_t expectedReplySize() const
Definition: MemObject.cc:238
void referenceBusy(StoreEntry &e)
update reference counters of the recently touched entry
Definition: Controller.cc:231
double capacity
the size limit
Definition: StoreStats.h:22
const A & max(A const &lhs, A const &rhs)
bool transientsReader(const StoreEntry &) const
whether the entry is in "reading from Transients" I/O state
Definition: Controller.cc:460
#define PRIu64
Definition: types.h:114
int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Controller.cc:223
bool memoryCacheHasSpaceFor(const int pagesRequired) const
whether the memory cache is allowed to store that many additional pages
Definition: Controller.cc:513
::Transients Transients
Definition: forward.h:53
uint16_t flags
Definition: Store.h:231
int hot_obj_count
hash_link * hash_lookup(hash_table *, const void *)
Definition: hash.cc:146
void memoryEvictCached(StoreEntry &)
Definition: Controller.cc:598
bool isAccepting() const
Definition: store.cc:1988
bool IamWorkerProcess()
whether the current process handles HTTP transactions and such
Definition: stub_tools.cc:47
int64_t endOffset() const
Definition: MemObject.cc:214
int store_swap_low
RemovalPolicy * mem_policy
Definition: MemObject.cc:44
void evictCached(StoreEntry &) override
Definition: Controller.cc:482
void destroyMemObject()
Definition: store.cc:386
struct SquidConfig::@82 Swap
bool allowCollapsing(StoreEntry *, const RequestFlags &, const HttpRequestMethod &)
tries to make the entry available for collapsing future requests
Definition: Controller.cc:727
int64_t accumulateMore(StoreEntry &) const
Definition: Controller.cc:472
int highWaterMark
Definition: SquidConfig.h:85
void(* Referenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:48
void invokeHandlers()
double size
bytes currently in use
Definition: StoreStats.h:20
static pid_t pid
Definition: IcmpSquid.cc:34
void addWriting(StoreEntry *, const cache_key *)
Definition: Controller.cc:753
shared entry metadata, used for synchronization
Definition: Transients.h:31
void init() override
Definition: Transients.cc:46
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
void transientsDisconnect(StoreEntry &)
disassociates the entry from the intransit table
Definition: Controller.cc:631
StoreEntry * find(const cache_key *)
Definition: Controller.cc:343
static bool Enabled()
Can we create and initialize Transients?
Definition: Transients.h:91
@ IN_MEMORY
Definition: enums.h:31
void abort()
Definition: store.cc:1077
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1929
sdirno swap_dirn
Definition: Store.h:237
Mem mem
all cache_dirs stats
Definition: StoreStats.h:48
#define EBIT_TEST(flag, bit)
Definition: defines.h:67
void setCollapsingRequirement(const bool required)
allow or forbid collapsed requests feeding
Definition: store.cc:2012
void evictIfFound(const cache_key *) override
Definition: Controller.cc:492
@ ioDone
Definition: forward.h:40
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition: Store.h:212
summary view of all disk caches (cache_dirs) combined
Definition: Disks.h:18
bool markedForDeletion(const cache_key *key) const
Definition: Controller.cc:299
struct SquidConfig::@95 Store
static size_t inUseCount()
Definition: store.cc:199
#define assert(EX)
Definition: assert.h:17
void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Controller.cc:106
FREE destroyStoreEntry
void stat(StoreEntry &) const override
Definition: Controller.cc:132
size_t store_pages_max
StoreSearch * NewLocalSearch()
Definition: LocalSearch.cc:44
std::ostream & CurrentException(std::ostream &os)
prints active (i.e., thrown but not yet handled) exception
Store::IoStatus io
current I/O state
Definition: MemObject.h:201
static size_t inUseCount()
Definition: MemObject.cc:47
void syncCollapsed(const sfileno)
Update local intransit entry after changes made by appending worker.
Definition: Controller.cc:765
hash_table * store_table
pid_t WaitForAnyPid(PidStatus &status, int flags)
Definition: tools.h:107
mem_status_t mem_status
Definition: Store.h:239
StoreSearch * search()
Definition: Controller.cc:205
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:133
time_t squid_curtime
Definition: stub_libtime.cc:20
size_t maxInMemObjSize
Definition: SquidConfig.h:268
signed_int32_t sfileno
Definition: forward.h:22
int64_t store_maxobjsize
void(* Dereferenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:49
int lowWaterMark
Definition: SquidConfig.h:86
uint64_t currentSize() const override
current size
Definition: Controller.cc:167
void setMemStatus(mem_status_t)
Definition: store.cc:1524
bool updateOnNotModified(StoreEntry *old, StoreEntry &e304)
Definition: Controller.cc:687
@ ksDefault
Definition: store_key_md5.h:19
uint64_t maxSize() const override
Definition: Controller.cc:153
static size_t InUseCount()
Definition: mem_node.cc:58
bool appliedUpdates
Definition: MemObject.h:90
void init() override
Definition: MemStore.cc:174
bool keepForLocalMemoryCache(StoreEntry &e) const
whether e should be kept in local RAM for possible future caching
Definition: Controller.cc:564
KeyScope
Definition: store_key_md5.h:18
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Controller.cc:181
StoreEntry * findCallbackXXX(const cache_key *)
Definition: Controller.cc:389
bool shared
whether memory cache is shared among workers
Definition: StoreStats.h:42
an std::runtime_error with thrower location info
Definition: TextException.h:20
int PidStatus
Definition: tools.h:91
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Controller.cc:87
void handleIdleEntry(StoreEntry &)
called when the entry is no longer needed by any transaction
Definition: Controller.cc:638
void unlinkRequest()
Definition: MemObject.h:56
double store_entry_count
number of StoreEntry objects in existence
Definition: StoreStats.h:51
#define Must(condition)
Definition: TextException.h:75
StoreEntry * peek(const cache_key *)
Definition: Controller.cc:421
@ ENTRY_SPECIAL
Definition: enums.h:79
#define DBG_IMPORTANT
Definition: Stream.h:38
const char * storeKeyText(const cache_key *key)
void allowSharing(StoreEntry &, const cache_key *)
indexes and adds SMP-tracking for an ephemeral peek() result
Definition: Controller.cc:364
bool transientsWriter(const StoreEntry &) const
whether the entry is in "writing to Transients" I/O state
Definition: Controller.cc:466
bool hasTransients() const
whether there is a corresponding locked transients table entry
Definition: Store.h:210
void release(const bool shareable=false)
Definition: store.cc:1146
void hashInsert(const cache_key *)
Definition: store.cc:424
bool memoryCachable()
checkCachable() and can be cached in memory
Definition: store.cc:1276
void freeMemorySpace(const int spaceRequired)
Definition: Controller.cc:522
uint64_t currentCount() const override
the total number of objects stored right now
Definition: Controller.cc:174
void trimMemory(const bool preserveSwappable)
Definition: store.cc:1814
void checkFoundCandidate(const StoreEntry &) const
flags problematic entries before find() commits to finalizing/returning them
Definition: Controller.cc:322
bool markedForDeletionAndAbandoned(const StoreEntry &) const
Definition: Controller.cc:306
bool hittingRequiresCollapsing() const
whether this entry can feed collapsed requests and only them
Definition: Store.h:215
int locked() const
Definition: Store.h:145
@ ioWriting
Definition: forward.h:40
RemovalPolicyNode repl
Definition: MemObject.h:213
bool swappedOut() const
whether the entire entry is now on disk (possibly marked for deletion)
Definition: Store.h:135
int store_swap_high
@ ioReading
Definition: forward.h:40
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:192
const A & min(A const &lhs, A const &rhs)
bool hasWriter
whether some worker is storing the entry
Definition: Transients.h:34
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:13
bool dereferenceIdle(StoreEntry &, bool wantsLocalMemory)
Definition: Controller.cc:256
class SquidConfig Config
Definition: SquidConfig.cc:12
~Controller() override
this destructor is never called because Controller singleton is immortal
Definition: Controller.cc:46
Controller & Root()
safely access controller singleton
Definition: Controller.cc:926

 

Introduction

Documentation

Support

Miscellaneous