Controller.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 20 Store Controller */
10 
11 #include "squid.h"
12 #include "mem_node.h"
13 #include "MemStore.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "store/Controller.h"
17 #include "store/Disks.h"
18 #include "store/LocalSearch.h"
19 #include "tools.h"
20 #include "Transients.h"
21 
22 #if HAVE_SYS_WAIT_H
23 #include <sys/wait.h>
24 #endif
25 
26 /*
27  * store_dirs_rebuilding is initialized to _1_ as a hack so that
28  * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
29  * cache_dirs have been read. For example, without this hack, Squid
30  * will try to write clean log files if -kparse fails (because it
31  * calls fatal()).
32  */
34 
36  swapDir(new Disks),
37  sharedMemStore(nullptr),
38  localMemStore(false),
39  transients(NULL)
40 {
42 }
43 
45 {
46  delete sharedMemStore;
47  delete transients;
48  delete swapDir;
49 
50  if (store_table) {
53  store_table = nullptr;
54  }
55 }
56 
57 void
59 {
60  if (IamWorkerProcess()) {
61  if (MemStore::Enabled()) {
62  sharedMemStore = new MemStore;
63  sharedMemStore->init();
64  } else if (Config.memMaxSize > 0) {
65  localMemStore = true;
66  }
67  }
68 
69  swapDir->init();
70 
72  transients = new Transients;
73  transients->init();
74  }
75 }
76 
77 void
79 {
80  swapDir->create();
81 
82 #if !_SQUID_WINDOWS_
83  pid_t pid;
84  do {
85  PidStatus status;
86  pid = WaitForAnyPid(status, WNOHANG);
87  } while (pid > 0 || (pid < 0 && errno == EINTR));
88 #endif
89 }
90 
91 void
93 {
94  static time_t last_warn_time = 0;
95 
96  swapDir->maintain();
97 
98  /* this should be emitted by the oversize dir, not globally */
99 
100  if (Root().currentSize() > Store::Root().maxSize()) {
101  if (squid_curtime - last_warn_time > 10) {
102  debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
103  << Store::Root().currentSize() / 1024.0 << " KB > "
104  << (Store::Root().maxSize() >> 10) << " KB");
105  last_warn_time = squid_curtime;
106  }
107  }
108 }
109 
110 void
112 {
113  if (sharedMemStore)
114  sharedMemStore->getStats(stats);
115  else {
116  // move this code to a non-shared memory cache class when we have it
117  stats.mem.shared = false;
118  stats.mem.capacity = Config.memMaxSize;
119  stats.mem.size = mem_node::StoreMemSize();
120  if (localMemStore) {
121  // XXX: also count internal/in-transit objects
122  stats.mem.count = hot_obj_count;
123  } else {
124  // XXX: count internal/in-transit objects instead
125  stats.mem.count = hot_obj_count;
126  }
127  }
128 
129  swapDir->getStats(stats);
130 
131  // low-level info not specific to memory or disk cache
132  stats.store_entry_count = StoreEntry::inUseCount();
133  stats.mem_object_count = MemObject::inUseCount();
134 }
135 
136 void
138 {
139  storeAppendPrintf(&output, "Store Directory Statistics:\n");
140  storeAppendPrintf(&output, "Store Entries : %lu\n",
141  (unsigned long int)StoreEntry::inUseCount());
142  storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
143  maxSize() >> 10);
144  storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
145  currentSize() / 1024.0);
146  storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
147  Math::doublePercent(currentSize(), maxSize()),
148  Math::doublePercent((maxSize() - currentSize()), maxSize()));
149 
150  if (sharedMemStore)
151  sharedMemStore->stat(output);
152 
153  /* now the swapDir */
154  swapDir->stat(output);
155 }
156 
157 /* if needed, this could be taught to cache the result */
158 uint64_t
160 {
161  /* TODO: include memory cache ? */
162  return swapDir->maxSize();
163 }
164 
165 uint64_t
167 {
168  /* TODO: include memory cache ? */
169  return swapDir->minSize();
170 }
171 
172 uint64_t
174 {
175  /* TODO: include memory cache ? */
176  return swapDir->currentSize();
177 }
178 
179 uint64_t
181 {
182  /* TODO: include memory cache ? */
183  return swapDir->currentCount();
184 }
185 
186 int64_t
188 {
189  /* TODO: include memory cache ? */
190  return swapDir->maxObjectSize();
191 }
192 
193 void
195 {
196  swapDir->configure();
197 
198  store_swap_high = (long) (((float) maxSize() *
199  (float) Config.Swap.highWaterMark) / (float) 100);
200  store_swap_low = (long) (((float) maxSize() *
201  (float) Config.Swap.lowWaterMark) / (float) 100);
203 
204  // TODO: move this into a memory cache class when we have one
205  const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
206  const int64_t disksMax = swapDir ? swapDir->maxObjectSize() : 0;
207  store_maxobjsize = std::max(disksMax, memMax);
208 }
209 
210 StoreSearch *
212 {
213  // this is the only kind of search we currently support
214  return NewLocalSearch();
215 }
216 
217 void
219 {
220  if (sharedMemStore)
221  sharedMemStore->sync();
222  swapDir->sync();
223 }
224 
225 /*
226  * handle callbacks all available fs'es
227  */
228 int
230 {
231  /* mem cache callbacks ? */
232  return swapDir->callback();
233 }
234 
236 void
238 {
239  // special entries do not belong to any specific Store, but are IN_MEMORY
240  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
241  return;
242 
243  /* Notify the fs that we're referencing this object again */
244 
245  if (e.hasDisk())
246  swapDir->reference(e);
247 
248  // Notify the memory cache that we're referencing this object again
249  if (sharedMemStore && e.mem_status == IN_MEMORY)
250  sharedMemStore->reference(e);
251 
252  // TODO: move this code to a non-shared memory cache class when we have it
253  if (e.mem_obj) {
254  if (mem_policy->Referenced)
256  }
257 }
258 
261 bool
263 {
264  // special entries do not belong to any specific Store, but are IN_MEMORY
265  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
266  return true;
267 
268  // idle private entries cannot be reused
269  if (EBIT_TEST(e.flags, KEY_PRIVATE))
270  return false;
271 
272  bool keepInStoreTable = false; // keep only if somebody needs it there
273 
274  // Notify the fs that we are not referencing this object any more. This
275  // should be done even if we overwrite keepInStoreTable afterwards.
276 
277  if (e.hasDisk())
278  keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
279 
280  // Notify the memory cache that we're not referencing this object any more
281  if (sharedMemStore && e.mem_status == IN_MEMORY)
282  keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
283 
284  // TODO: move this code to a non-shared memory cache class when we have it
285  if (e.mem_obj) {
288  // non-shared memory cache relies on store_table
289  if (localMemStore)
290  keepInStoreTable = wantsLocalMemory || keepInStoreTable;
291  }
292 
293  if (e.hittingRequiresCollapsing()) {
294  // If we were writing this now-locally-idle entry, then we did not
295  // finish and should now destroy an incomplete entry. Otherwise, do not
296  // leave this idle StoreEntry behind because handleIMSReply() lacks
297  // freshness checks when hitting a collapsed revalidation entry.
298  keepInStoreTable = false; // may overrule fs decisions made above
299  }
300 
301  return keepInStoreTable;
302 }
303 
304 bool
306 {
307  // assuming a public key, checking Transients should cover all cases.
308  return transients && transients->markedForDeletion(key);
309 }
310 
311 bool
313 {
314  // The opposite check order could miss a reader that has arrived after the
315  // !readers() and before the markedForDeletion() check.
316  return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
317  transients && !transients->readers(e);
318 }
319 
320 bool
322 {
323  return swapDir->hasReadableEntry(e);
324 }
325 
327 void
329 {
330  checkTransients(entry);
331 
332  // The "hittingRequiresCollapsing() has an active writer" checks below
333  // protect callers from getting stuck and/or from using a stale revalidation
334  // reply. However, these protections are not reliable because the writer may
335  // disappear at any time and/or without a trace. Collapsing adds risks...
336  if (entry.hittingRequiresCollapsing()) {
337  if (entry.hasTransients()) {
338  // Too late to check here because the writer may be gone by now, but
339  // Transients do check when they setCollapsingRequirement().
340  } else {
341  // a local writer must hold a lock on its writable entry
342  if (!(entry.locked() && entry.isAccepting()))
343  throw TextException("no local writer", Here());
344  }
345  }
346 }
347 
348 StoreEntry *
350 {
351  if (const auto entry = peek(key)) {
352  try {
353  if (!entry->key)
354  allowSharing(*entry, key);
355  checkFoundCandidate(*entry);
356  entry->touch();
357  referenceBusy(*entry);
358  return entry;
359  } catch (const std::exception &ex) {
360  debugs(20, 2, "failed with " << *entry << ": " << ex.what());
361  entry->release();
362  // fall through
363  }
364  }
365  return NULL;
366 }
367 
369 void
371 {
372  // TODO: refactor to throw on anchorToCache() inSync errors!
373 
374  // anchorToCache() below and many find() callers expect a registered entry
375  addReading(&entry, key);
376 
377  if (entry.hasTransients()) {
378  // store hadWriter before computing `found`; \see Transients::get()
379  const auto hadWriter = transients->hasWriter(entry);
380  bool inSync = false;
381  const bool found = anchorToCache(entry, inSync);
382  if (found && !inSync)
383  throw TexcHere("cannot sync");
384  if (!found) {
385  // !found should imply hittingRequiresCollapsing() regardless of writer presence
386  if (!entry.hittingRequiresCollapsing()) {
387  debugs(20, DBG_IMPORTANT, "BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry);
388  throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here());
389  }
390 
391  if (!hadWriter) {
392  // prevent others from falling into the same trap
393  throw TextException("unattached transients entry missing writer", Here());
394  }
395  }
396  }
397 }
398 
399 StoreEntry *
401 {
402  // We could check for mem_obj presence (and more), moving and merging some
403  // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
404  // but that would mean polluting Store with HTCP/ICP code. Instead, we
405  // should encapsulate callback-related data in a protocol-neutral MemObject
406  // member or use an HTCP/ICP-specific index rather than store_table.
407 
408  // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
409  return static_cast<StoreEntry*>(hash_lookup(store_table, key));
410 }
411 
415 StoreEntry *
417 {
418  if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
419  // callers must only search for public entries
420  assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
421  assert(e->publicKey());
422  checkTransients(*e);
423 
424  // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
425  // because their backing store slot may be gone already.
426  return e;
427  }
428  return nullptr;
429 }
430 
431 StoreEntry *
433 {
434  debugs(20, 3, storeKeyText(key));
435 
436  if (markedForDeletion(key)) {
437  debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
438  return nullptr;
439  }
440 
441  if (StoreEntry *e = peekAtLocal(key)) {
442  debugs(20, 3, "got local in-transit entry: " << *e);
443  return e;
444  }
445 
446  // Must search transients before caches because we must sync those we find.
447  if (transients) {
448  if (StoreEntry *e = transients->get(key)) {
449  debugs(20, 3, "got shared in-transit entry: " << *e);
450  return e;
451  }
452  }
453 
454  if (sharedMemStore) {
455  if (StoreEntry *e = sharedMemStore->get(key)) {
456  debugs(20, 3, HERE << "got mem-cached entry: " << *e);
457  return e;
458  }
459  }
460 
461  if (swapDir) {
462  if (StoreEntry *e = swapDir->get(key)) {
463  debugs(20, 3, "got disk-cached entry: " << *e);
464  return e;
465  }
466  }
467 
468  debugs(20, 4, "cannot locate " << storeKeyText(key));
469  return nullptr;
470 }
471 
472 bool
474 {
475  return transients && e.hasTransients() && transients->isReader(e);
476 }
477 
478 bool
480 {
481  return transients && e.hasTransients() && transients->isWriter(e);
482 }
483 
484 int64_t
486 {
487  return swapDir ? swapDir->accumulateMore(entry) : 0;
488  // The memory cache should not influence for-swapout accumulation decision.
489 }
490 
491 // Must be called from StoreEntry::release() or releaseRequest() because
492 // those methods currently manage local indexing of StoreEntry objects.
493 // TODO: Replace StoreEntry::release*() with Root().evictCached().
494 void
496 {
497  debugs(20, 7, e);
498  if (transients)
499  transients->evictCached(e);
500  memoryEvictCached(e);
501  if (swapDir)
502  swapDir->evictCached(e);
503 }
504 
505 void
507 {
508  debugs(20, 7, storeKeyText(key));
509 
510  if (StoreEntry *entry = peekAtLocal(key)) {
511  debugs(20, 5, "marking local in-transit " << *entry);
512  entry->release(true);
513  return;
514  }
515 
516  if (sharedMemStore)
517  sharedMemStore->evictIfFound(key);
518  if (swapDir)
519  swapDir->evictIfFound(key);
520  if (transients)
521  transients->evictIfFound(key);
522 }
523 
525 bool
526 Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
527 {
528  // XXX: We count mem_nodes but may free shared memory pages instead.
529  const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
530  debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
531  return fits;
532 }
533 
534 void
535 Store::Controller::freeMemorySpace(const int bytesRequired)
536 {
537  const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
538 
539  if (memoryCacheHasSpaceFor(pagesRequired))
540  return;
541 
542  // XXX: When store_pages_max is smaller than pagesRequired, we should not
543  // look for more space (but we do because we want to abandon idle entries?).
544 
545  // limit our performance impact to one walk per second
546  static time_t lastWalk = 0;
547  if (lastWalk == squid_curtime)
548  return;
549  lastWalk = squid_curtime;
550 
551  debugs(20, 2, "need " << pagesRequired << " pages");
552 
553  // let abandon()/handleIdleEntry() know about the impeding memory shortage
554  memoryPagesDebt_ = pagesRequired;
555 
556  // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
557  // XXX: Limit iterations by time, not arbitrary count.
558  const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
559  int removed = 0;
560  while (const auto entry = walker->Next(walker)) {
561  // Abandoned memory cache entries are purged during memory shortage.
562  entry->abandon(__FUNCTION__); // may delete entry
563  ++removed;
564 
565  if (memoryCacheHasSpaceFor(pagesRequired))
566  break;
567  }
568  // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
569  debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
570  walker->Done(walker);
571  memoryPagesDebt_ = 0;
572 }
573 
574 // move this into [non-shared] memory cache class when we have one
576 bool
578 {
579  if (!e.memoryCachable())
580  return false;
581 
582  // does the current and expected size obey memory caching limits?
583  assert(e.mem_obj);
584  const int64_t loadedSize = e.mem_obj->endOffset();
585  const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
586  const int64_t ramSize = max(loadedSize, expectedSize);
587  const int64_t ramLimit = min(
588  static_cast<int64_t>(Config.memMaxSize),
589  static_cast<int64_t>(Config.Store.maxInMemObjSize));
590  return ramSize <= ramLimit;
591 }
592 
593 void
594 Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
595 {
596  bool keepInLocalMemory = false;
597  if (sharedMemStore)
598  sharedMemStore->write(e); // leave keepInLocalMemory false
599  else if (localMemStore)
600  keepInLocalMemory = keepForLocalMemoryCache(e);
601 
602  debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
603 
604  if (!keepInLocalMemory)
605  e.trimMemory(preserveSwappable);
606 }
607 
610 void
612 {
613  // TODO: Untangle memory caching from mem_obj.
614  if (sharedMemStore)
615  sharedMemStore->evictCached(e);
616  else // TODO: move into [non-shared] memory cache class when we have one
617  if (!e.locked())
618  e.destroyMemObject();
619 }
620 
621 void
623 {
624  if (sharedMemStore)
625  sharedMemStore->disconnect(e);
626  // else nothing to do for non-shared memory cache
627 }
628 
629 void
631 {
632  // Marking the transients entry is sufficient to prevent new readers from
633  // starting to wait for `e` updates and to inform the current readers (and,
634  // hence, Broadcast() recipients) about the underlying Store problems.
635  if (transients && e.hasTransients())
636  transients->evictCached(e);
637 }
638 
639 void
641 {
642  // transients->isWriter(e) is false if `e` is writing to its second store
643  // after finishing writing to its first store: At the end of the first swap
644  // out, the transients writer becomes a reader and (XXX) we never switch
645  // back to writing, even if we start swapping out again (to another store).
646  if (transients && e.hasTransients() && transients->isWriter(e))
647  transients->completeWriting(e);
648 }
649 
650 int
652 {
653  return (transients && e.hasTransients()) ?
654  transients->readers(e) : 0;
655 }
656 
657 void
659 {
660  if (transients)
661  transients->disconnect(e);
662 }
663 
664 void
666 {
667  if (transients)
668  transients->clearCollapsingRequirement(e);
669 }
670 
671 void
673 {
674  bool keepInLocalMemory = false;
675 
676  if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
677  // Icons (and cache digests?) should stay in store_table until we
678  // have a dedicated storage for them (that would not purge them).
679  // They are not managed [well] by any specific Store handled below.
680  keepInLocalMemory = true;
681  } else if (sharedMemStore) {
682  // leave keepInLocalMemory false; sharedMemStore maintains its own cache
683  } else if (localMemStore) {
684  keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
685  // the local memory cache is not overflowing
686  memoryCacheHasSpaceFor(memoryPagesDebt_);
687  }
688 
689  // An idle, unlocked entry that only belongs to a SwapDir which controls
690  // its own index, should not stay in the global store_table.
691  if (!dereferenceIdle(e, keepInLocalMemory)) {
692  debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
693  destroyStoreEntry(static_cast<hash_link*>(&e));
694  return;
695  }
696 
697  debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
698 
699  // formerly known as "WARNING: found KEY_PRIVATE"
701 
702  // TODO: move this into [non-shared] memory cache class when we have one
703  if (keepInLocalMemory) {
705  e.mem_obj->unlinkRequest();
706  return;
707  }
708 
709  // We know the in-memory data will be gone. Get rid of the entire entry if
710  // it has nothing worth preserving on disk either.
711  if (!e.swappedOut()) {
712  e.release(); // deletes e
713  return;
714  }
715 
716  memoryEvictCached(e); // may already be gone
717  // and keep the entry in store_table for its on-disk data
718 }
719 
720 void
722 {
723  Must(old);
724  Must(old->mem_obj);
725  Must(e304.mem_obj);
726 
727  // updateOnNotModified() may be called many times for the same old entry.
728  // e304.mem_obj->appliedUpdates value distinguishes two cases:
729  // false: Independent store clients revalidating the same old StoreEntry.
730  // Each such update uses its own e304. The old StoreEntry
731  // accumulates such independent updates.
732  // true: Store clients feeding off the same 304 response. Each such update
733  // uses the same e304. For timestamps correctness and performance
734  // sake, it is best to detect and skip such repeated update calls.
735  if (e304.mem_obj->appliedUpdates) {
736  debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
737  return;
738  }
739  e304.mem_obj->appliedUpdates = true;
740 
741  if (!old->updateOnNotModified(e304)) {
742  debugs(20, 5, "updated nothing in " << *old << " with " << e304);
743  return;
744  }
745 
746  if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
747  sharedMemStore->updateHeaders(old);
748 
749  if (old->swap_dirn > -1)
750  swapDir->updateHeaders(old);
751 }
752 
753 bool
755  const HttpRequestMethod &)
756 {
757  const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
758  // set the flag now so that it gets copied into the Transients entry
759  e->setCollapsingRequirement(true);
760  if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
761  debugs(20, 3, "may " << (transients && e->hasTransients() ?
762  "SMP-" : "locally-") << "collapse " << *e);
763  return true;
764  }
765  // paranoid cleanup; the flag is meaningless for private entries
766  e->setCollapsingRequirement(false);
767  return false;
768 }
769 
770 void
772 {
773  if (transients)
774  transients->monitorIo(e, key, Store::ioReading);
775  e->hashInsert(key);
776 }
777 
778 void
780 {
781  assert(e);
782  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
783  return; // constant memory-resident entries do not need transients
784 
785  if (transients)
786  transients->monitorIo(e, key, Store::ioWriting);
787  // else: non-SMP configurations do not need transients
788 }
789 
790 void
792 {
793  assert(transients);
794 
795  StoreEntry *collapsed = transients->findCollapsed(xitIndex);
796  if (!collapsed) { // the entry is no longer active, ignore update
797  debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
798  return;
799  }
800 
801  if (!collapsed->locked()) {
802  debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
803  handleIdleEntry(*collapsed);
804  return;
805  }
806 
807  assert(collapsed->mem_obj);
808 
809  if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
810  debugs(20, 3, "skipping already aborted " << *collapsed);
811  return;
812  }
813 
814  debugs(20, 7, "syncing " << *collapsed);
815 
816  Transients::EntryStatus entryStatus;
817  transients->status(*collapsed, entryStatus);
818 
819  if (!entryStatus.collapsed) {
820  debugs(20, 5, "removing collapsing requirement for " << *collapsed << " since remote writer probably got headers");
821  collapsed->setCollapsingRequirement(false);
822  }
823 
824  if (entryStatus.waitingToBeFreed) {
825  debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
826  collapsed->release(true); // may already be marked
827  }
828 
829  if (transients->isWriter(*collapsed))
830  return; // readers can only change our waitingToBeFreed flag
831 
832  assert(transients->isReader(*collapsed));
833 
834  if (entryStatus.abortedByWriter) {
835  debugs(20, 3, "aborting " << *collapsed << " because its writer has aborted");
836  collapsed->abort();
837  return;
838  }
839 
840  if (entryStatus.collapsed && !collapsed->hittingRequiresCollapsing()) {
841  debugs(20, 3, "aborting " << *collapsed << " due to writer/reader collapsing state mismatch");
842  collapsed->abort();
843  return;
844  }
845 
846  bool found = false;
847  bool inSync = false;
848  if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
849  found = true;
850  inSync = true;
851  debugs(20, 7, "fully mem-loaded " << *collapsed);
852  } else if (sharedMemStore && collapsed->hasMemStore()) {
853  found = true;
854  inSync = sharedMemStore->updateAnchored(*collapsed);
855  // TODO: handle entries attached to both memory and disk
856  } else if (swapDir && collapsed->hasDisk()) {
857  found = true;
858  inSync = swapDir->updateAnchored(*collapsed);
859  } else {
860  found = anchorToCache(*collapsed, inSync);
861  }
862 
863  if (entryStatus.waitingToBeFreed && !found) {
864  debugs(20, 3, "aborting unattached " << *collapsed <<
865  " because it was marked for deletion before we could attach it");
866  collapsed->abort();
867  return;
868  }
869 
870  if (inSync) {
871  debugs(20, 5, "synced " << *collapsed);
872  collapsed->invokeHandlers();
873  return;
874  }
875 
876  if (found) { // unrecoverable problem syncing this entry
877  debugs(20, 3, "aborting unsyncable " << *collapsed);
878  collapsed->abort();
879  return;
880  }
881 
882  // the entry is still not in one of the caches
883  debugs(20, 7, "waiting " << *collapsed);
884 }
885 
889 bool
891 {
892  assert(entry.hasTransients());
893  assert(transientsReader(entry));
894 
895  debugs(20, 7, "anchoring " << entry);
896 
897  bool found = false;
898  if (sharedMemStore)
899  found = sharedMemStore->anchorToCache(entry, inSync);
900  if (!found && swapDir)
901  found = swapDir->anchorToCache(entry, inSync);
902 
903  if (found) {
904  if (inSync)
905  debugs(20, 7, "anchored " << entry);
906  else
907  debugs(20, 5, "failed to anchor " << entry);
908  } else {
909  debugs(20, 7, "skipping not yet cached " << entry);
910  }
911 
912  return found;
913 }
914 
915 bool
917 {
918  return MemStore::Enabled() || Disks::SmpAware();
919 }
920 
921 void
923 {
924  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
925  return;
926  assert(!transients || e.hasTransients());
927 }
928 
929 namespace Store {
931 }
932 
935 {
936  assert(TheRoot);
937  return *TheRoot;
938 }
939 
940 void
942 {
943  TheRoot = root ? root : new Controller;
944 }
945 
946 void
948 {
949  TheRoot = nullptr;
950 }
951 
int transientReaders(const StoreEntry &) const
number of the transient entry readers some time ago
Definition: Controller.cc:651
static size_t StoreMemSize()
Definition: mem_node.cc:61
void memoryDisconnect(StoreEntry &)
disassociates the entry from the memory cache, preserving cached data
Definition: Controller.cc:622
class Ping::pingStats_ stats
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Controller.cc:166
#define Here()
source code location of the caller
Definition: Here.h:15
#define SM_PAGE_SIZE
Definition: defines.h:65
void memoryOut(StoreEntry &, const bool preserveSwappable)
called to get rid of no longer needed entry data in RAM, if any
Definition: Controller.cc:594
bool updateOnNotModified(const StoreEntry &e304)
Definition: store.cc:1475
virtual void create() override
create system resources needed for this store to operate in the future
Definition: Controller.cc:78
unsigned char cache_key
Store key.
Definition: forward.h:29
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition: MemStore.h:68
bool makePublic(const KeyScope keyScope=ksDefault)
Definition: store.cc:165
bool hasReadableDiskEntry(const StoreEntry &) const
whether there is a disk entry with e.key
Definition: Controller.cc:321
virtual void sync() override
prepare for shutdown
Definition: Controller.cc:218
MemObject * mem_obj
Definition: Store.h:222
StoreEntry * peekAtLocal(const cache_key *)
Definition: Controller.cc:416
bool waitingToBeFreed
whether the entry was marked for deletion
Definition: Transients.h:34
size_t memMaxSize
Definition: SquidConfig.h:89
@ ksRevalidation
Definition: store_key_md5.h:20
static bool SmpAware()
whether there are any SMP-aware storages
Definition: Controller.cc:916
@ KEY_PRIVATE
Definition: enums.h:102
void addReading(StoreEntry *, const cache_key *)
Definition: Controller.cc:771
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:869
void FreeMemory()
undo Init()
Definition: Controller.cc:947
void checkTransients(const StoreEntry &) const
Definition: Controller.cc:922
MemCache memCache
current [shared] memory caching state for the entry
Definition: MemObject.h:186
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:646
virtual void init() override
Definition: Controller.cc:58
RemovalPurgeWalker *(* PurgeInit)(RemovalPolicy *policy, int max_scan)
Definition: RemovalPolicy.h:51
void configure()
update configuration, including limits (re)calculation
Definition: Controller.cc:194
int64_t expectedReplySize() const
Definition: MemObject.cc:240
void referenceBusy(StoreEntry &e)
update reference counters of the recently touched entry
Definition: Controller.cc:237
bool transientsReader(const StoreEntry &) const
whether the entry is in "reading from Transients" I/O state
Definition: Controller.cc:473
#define PRIu64
Definition: types.h:120
virtual int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Controller.cc:229
bool memoryCacheHasSpaceFor(const int pagesRequired) const
whether the memory cache is allowed to store that many additional pages
Definition: Controller.cc:526
::Transients Transients
Definition: forward.h:52
SQUIDCEXTERN void hashFreeItems(hash_table *, HASHFREE *)
Definition: hash.cc:252
uint16_t flags
Definition: Store.h:233
int hot_obj_count
void memoryEvictCached(StoreEntry &)
Definition: Controller.cc:611
bool isAccepting() const
Definition: store.cc:1990
bool IamWorkerProcess()
whether the current process handles HTTP transactions and such
Definition: stub_tools.cc:47
Io io
current I/O state
Definition: MemObject.h:184
int64_t endOffset() const
Definition: MemObject.cc:216
#define DBG_CRITICAL
Definition: Debug.h:40
int store_swap_low
SQUIDCEXTERN void hashFreeMemory(hash_table *)
Definition: hash.cc:268
RemovalPolicy * mem_policy
Definition: MemObject.cc:44
#define DBG_IMPORTANT
Definition: Debug.h:41
void transientsCompleteWriting(StoreEntry &)
marks the entry completed for collapsed requests
Definition: Controller.cc:640
virtual void evictCached(StoreEntry &) override
Definition: Controller.cc:495
void destroyMemObject()
Definition: store.cc:398
bool allowCollapsing(StoreEntry *, const RequestFlags &, const HttpRequestMethod &)
tries to make the entry available for collapsing future requests
Definition: Controller.cc:754
int64_t accumulateMore(StoreEntry &) const
Definition: Controller.cc:485
int highWaterMark
Definition: SquidConfig.h:83
void(* Referenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:48
A const & max(A const &lhs, A const &rhs)
bool abortedByWriter
whether the entry was aborted
Definition: Transients.h:33
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:58
void invokeHandlers()
static pid_t pid
Definition: IcmpSquid.cc:35
void addWriting(StoreEntry *, const cache_key *)
Definition: Controller.cc:779
@ ENTRY_ABORTED
Definition: enums.h:115
shared entry metadata, used for synchronization
Definition: Transients.h:31
virtual void init() override
Definition: Transients.cc:41
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
void transientsDisconnect(StoreEntry &)
disassociates the entry from the intransit table
Definition: Controller.cc:658
StoreEntry * find(const cache_key *)
Definition: Controller.cc:349
static bool Enabled()
Can we create and initialize Transients?
Definition: Transients.h:94
#define NULL
Definition: types.h:166
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:123
@ IN_MEMORY
Definition: enums.h:36
void abort()
Definition: store.cc:1091
void stopSharing(StoreEntry &)
stop any current (and prevent any future) SMP sharing of the given entry
Definition: Controller.cc:630
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1930
sdirno swap_dirn
Definition: Store.h:239
#define EBIT_TEST(flag, bit)
Definition: defines.h:69
void setCollapsingRequirement(const bool required)
allow or forbid collapsed requests feeding
Definition: store.cc:2014
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:152
virtual void evictIfFound(const cache_key *) override
Definition: Controller.cc:506
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition: Store.h:214
summary view of all disk caches (cache_dirs) combined
Definition: Disks.h:19
bool markedForDeletion(const cache_key *key) const
Definition: Controller.cc:305
static size_t inUseCount()
Definition: store.cc:197
#define assert(EX)
Definition: assert.h:19
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Controller.cc:111
FREE destroyStoreEntry
virtual void stat(StoreEntry &) const override
Definition: Controller.cc:137
size_t store_pages_max
StoreSearch * NewLocalSearch()
Definition: LocalSearch.cc:44
@ ENTRY_SPECIAL
Definition: enums.h:84
static size_t inUseCount()
Definition: MemObject.cc:47
void syncCollapsed(const sfileno)
Update local intransit entry after changes made by appending worker.
Definition: Controller.cc:791
hash_table * store_table
pid_t WaitForAnyPid(PidStatus &status, int flags)
Definition: tools.h:111
mem_status_t mem_status
Definition: Store.h:241
StoreSearch * search()
Definition: Controller.cc:211
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
time_t squid_curtime
Definition: stub_time.cc:17
void transientsClearCollapsingRequirement(StoreEntry &e)
removes collapsing requirement (for future hits)
Definition: Controller.cc:665
bool anchorToCache(StoreEntry &e, bool &inSync)
Definition: Controller.cc:890
size_t maxInMemObjSize
Definition: SquidConfig.h:273
signed_int32_t sfileno
Definition: forward.h:22
int64_t store_maxobjsize
void(* Dereferenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:49
void Init(Controller *root=nullptr)
initialize the storage module; a custom root is used by unit tests only
Definition: Controller.cc:941
int lowWaterMark
Definition: SquidConfig.h:84
virtual uint64_t currentSize() const override
current size
Definition: Controller.cc:173
void setMemStatus(mem_status_t)
Definition: store.cc:1541
static constexpr Io ioDone
Definition: MemObject.h:166
@ ksDefault
Definition: store_key_md5.h:19
virtual uint64_t maxSize() const override
Definition: Controller.cc:159
static size_t InUseCount()
Definition: mem_node.cc:55
bool appliedUpdates
Definition: MemObject.h:89
virtual void init() override
Definition: MemStore.cc:171
struct SquidConfig::@95 Swap
bool keepForLocalMemoryCache(StoreEntry &e) const
whether e should be kept in local RAM for possible future caching
Definition: Controller.cc:577
KeyScope
Definition: store_key_md5.h:18
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Controller.cc:187
StoreEntry * findCallbackXXX(const cache_key *)
Definition: Controller.cc:400
void updateOnNotModified(StoreEntry *old, StoreEntry &e304)
using a 304 response, update the old entry (metadata and reply headers)
Definition: Controller.cc:721
struct SquidConfig::@109 Store
an std::runtime_error with thrower location info
Definition: TextException.h:20
int PidStatus
Definition: tools.h:94
static RefCount< Controller > TheRoot
Definition: Controller.cc:930
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Controller.cc:92
void handleIdleEntry(StoreEntry &)
called when the entry is no longer needed by any transaction
Definition: Controller.cc:672
void unlinkRequest()
Definition: MemObject.h:55
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:73
StoreEntry * peek(const cache_key *)
Definition: Controller.cc:432
const char * storeKeyText(const cache_key *key)
void allowSharing(StoreEntry &, const cache_key *)
indexes and adds SMP-tracking for an ephemeral peek() result
Definition: Controller.cc:370
bool transientsWriter(const StoreEntry &) const
whether the entry is in "writing to Transients" I/O state
Definition: Controller.cc:479
bool hasTransients() const
whether there is a corresponding locked transients table entry
Definition: Store.h:212
bool collapsed
whether the entry allows collapsing
Definition: Transients.h:35
void release(const bool shareable=false)
Definition: store.cc:1161
void hashInsert(const cache_key *)
Definition: store.cc:438
bool memoryCachable()
checkCachable() and can be cached in memory
Definition: store.cc:1292
void freeMemorySpace(const int spaceRequired)
Definition: Controller.cc:535
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: Controller.cc:180
void trimMemory(const bool preserveSwappable)
Definition: store.cc:1815
void checkFoundCandidate(const StoreEntry &) const
flags problematic entries before find() commits to finalizing/returning them
Definition: Controller.cc:328
bool markedForDeletionAndAbandoned(const StoreEntry &) const
Definition: Controller.cc:312
bool hittingRequiresCollapsing() const
whether this entry can feed collapsed requests and only them
Definition: Store.h:217
int locked() const
Definition: Store.h:145
@ ioWriting
Definition: forward.h:40
RemovalPolicyNode repl
Definition: MemObject.h:196
bool swappedOut() const
whether the entire entry is now on disk (possibly marked for deletion)
Definition: Store.h:135
SQUIDCEXTERN hash_link * hash_lookup(hash_table *, const void *)
Definition: hash.cc:146
#define false
Definition: GnuRegex.c:233
A const & min(A const &lhs, A const &rhs)
int store_swap_high
@ ioReading
Definition: forward.h:40
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:14
bool dereferenceIdle(StoreEntry &, bool wantsLocalMemory)
Definition: Controller.cc:262
class SquidConfig Config
Definition: SquidConfig.cc:12
virtual ~Controller() override
Definition: Controller.cc:44
Controller & Root()
safely access controller singleton
Definition: Controller.cc:934

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors