Controller.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 20 Store Controller */
10 
11 #include "squid.h"
12 #include "mem_node.h"
13 #include "MemStore.h"
14 #include "profiler/Profiler.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "store/Controller.h"
18 #include "store/Disks.h"
19 #include "store/LocalSearch.h"
20 #include "tools.h"
21 #include "Transients.h"
22 
23 #if HAVE_SYS_WAIT_H
24 #include <sys/wait.h>
25 #endif
26 
27 /*
28  * store_dirs_rebuilding is initialized to _1_ as a hack so that
29  * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
30  * cache_dirs have been read. For example, without this hack, Squid
31  * will try to write clean log files if -kparse fails (because it
32  * calls fatal()).
33  */
35 
37  swapDir(new Disks),
38  sharedMemStore(nullptr),
39  localMemStore(false),
40  transients(NULL)
41 {
43 }
44 
46 {
47  delete sharedMemStore;
48  delete transients;
49  delete swapDir;
50 
51  if (store_table) {
54  store_table = nullptr;
55  }
56 }
57 
58 void
60 {
61  if (IamWorkerProcess()) {
62  if (MemStore::Enabled()) {
63  sharedMemStore = new MemStore;
64  sharedMemStore->init();
65  } else if (Config.memMaxSize > 0) {
66  localMemStore = true;
67  }
68  }
69 
70  swapDir->init();
71 
73  transients = new Transients;
74  transients->init();
75  }
76 }
77 
78 void
80 {
81  swapDir->create();
82 
83 #if !_SQUID_WINDOWS_
84  pid_t pid;
85  do {
86  PidStatus status;
87  pid = WaitForAnyPid(status, WNOHANG);
88  } while (pid > 0 || (pid < 0 && errno == EINTR));
89 #endif
90 }
91 
92 void
94 {
95  static time_t last_warn_time = 0;
96 
97  PROF_start(storeMaintainSwapSpace);
98  swapDir->maintain();
99 
100  /* this should be emitted by the oversize dir, not globally */
101 
102  if (Root().currentSize() > Store::Root().maxSize()) {
103  if (squid_curtime - last_warn_time > 10) {
104  debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
105  << Store::Root().currentSize() / 1024.0 << " KB > "
106  << (Store::Root().maxSize() >> 10) << " KB");
107  last_warn_time = squid_curtime;
108  }
109  }
110 
111  PROF_stop(storeMaintainSwapSpace);
112 }
113 
114 void
116 {
117  if (sharedMemStore)
118  sharedMemStore->getStats(stats);
119  else {
120  // move this code to a non-shared memory cache class when we have it
121  stats.mem.shared = false;
122  stats.mem.capacity = Config.memMaxSize;
123  stats.mem.size = mem_node::StoreMemSize();
124  if (localMemStore) {
125  // XXX: also count internal/in-transit objects
126  stats.mem.count = hot_obj_count;
127  } else {
128  // XXX: count internal/in-transit objects instead
129  stats.mem.count = hot_obj_count;
130  }
131  }
132 
133  swapDir->getStats(stats);
134 
135  // low-level info not specific to memory or disk cache
136  stats.store_entry_count = StoreEntry::inUseCount();
137  stats.mem_object_count = MemObject::inUseCount();
138 }
139 
140 void
142 {
143  storeAppendPrintf(&output, "Store Directory Statistics:\n");
144  storeAppendPrintf(&output, "Store Entries : %lu\n",
145  (unsigned long int)StoreEntry::inUseCount());
146  storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
147  maxSize() >> 10);
148  storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
149  currentSize() / 1024.0);
150  storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
151  Math::doublePercent(currentSize(), maxSize()),
152  Math::doublePercent((maxSize() - currentSize()), maxSize()));
153 
154  if (sharedMemStore)
155  sharedMemStore->stat(output);
156 
157  /* now the swapDir */
158  swapDir->stat(output);
159 }
160 
161 /* if needed, this could be taught to cache the result */
162 uint64_t
164 {
165  /* TODO: include memory cache ? */
166  return swapDir->maxSize();
167 }
168 
169 uint64_t
171 {
172  /* TODO: include memory cache ? */
173  return swapDir->minSize();
174 }
175 
176 uint64_t
178 {
179  /* TODO: include memory cache ? */
180  return swapDir->currentSize();
181 }
182 
183 uint64_t
185 {
186  /* TODO: include memory cache ? */
187  return swapDir->currentCount();
188 }
189 
190 int64_t
192 {
193  /* TODO: include memory cache ? */
194  return swapDir->maxObjectSize();
195 }
196 
197 void
199 {
200  swapDir->updateLimits();
201 
202  store_swap_high = (long) (((float) maxSize() *
203  (float) Config.Swap.highWaterMark) / (float) 100);
204  store_swap_low = (long) (((float) maxSize() *
205  (float) Config.Swap.lowWaterMark) / (float) 100);
207 
208  // TODO: move this into a memory cache class when we have one
209  const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
210  const int64_t disksMax = swapDir ? swapDir->maxObjectSize() : 0;
211  store_maxobjsize = std::max(disksMax, memMax);
212 }
213 
214 StoreSearch *
216 {
217  // this is the only kind of search we currently support
218  return NewLocalSearch();
219 }
220 
221 void
223 {
224  if (sharedMemStore)
225  sharedMemStore->sync();
226  swapDir->sync();
227 }
228 
229 /*
230  * handle callbacks all available fs'es
231  */
232 int
234 {
235  /* This will likely double count. That's ok. */
236  PROF_start(storeDirCallback);
237 
238  /* mem cache callbacks ? */
239  int result = swapDir->callback();
240 
241  PROF_stop(storeDirCallback);
242 
243  return result;
244 }
245 
247 void
249 {
250  // special entries do not belong to any specific Store, but are IN_MEMORY
251  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
252  return;
253 
254  /* Notify the fs that we're referencing this object again */
255 
256  if (e.hasDisk())
257  swapDir->reference(e);
258 
259  // Notify the memory cache that we're referencing this object again
260  if (sharedMemStore && e.mem_status == IN_MEMORY)
261  sharedMemStore->reference(e);
262 
263  // TODO: move this code to a non-shared memory cache class when we have it
264  if (e.mem_obj) {
265  if (mem_policy->Referenced)
267  }
268 }
269 
272 bool
274 {
275  // special entries do not belong to any specific Store, but are IN_MEMORY
276  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
277  return true;
278 
279  bool keepInStoreTable = false; // keep only if somebody needs it there
280 
281  // Notify the fs that we are not referencing this object any more. This
282  // should be done even if we overwrite keepInStoreTable afterwards.
283 
284  if (e.hasDisk())
285  keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
286 
287  // Notify the memory cache that we're not referencing this object any more
288  if (sharedMemStore && e.mem_status == IN_MEMORY)
289  keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
290 
291  // TODO: move this code to a non-shared memory cache class when we have it
292  if (e.mem_obj) {
295  // non-shared memory cache relies on store_table
296  if (localMemStore)
297  keepInStoreTable = wantsLocalMemory || keepInStoreTable;
298  }
299 
300  if (e.hittingRequiresCollapsing()) {
301  // If we were writing this now-locally-idle entry, then we did not
302  // finish and should now destroy an incomplete entry. Otherwise, do not
303  // leave this idle StoreEntry behind because handleIMSReply() lacks
304  // freshness checks when hitting a collapsed revalidation entry.
305  keepInStoreTable = false; // may overrule fs decisions made above
306  }
307 
308  return keepInStoreTable;
309 }
310 
311 bool
313 {
314  // assuming a public key, checking Transients should cover all cases.
315  return transients && transients->markedForDeletion(key);
316 }
317 
318 bool
320 {
321  // The opposite check order could miss a reader that has arrived after the
322  // !readers() and before the markedForDeletion() check.
323  return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
324  transients && !transients->readers(e);
325 }
326 
327 bool
329 {
330  return swapDir->hasReadableEntry(e);
331 }
332 
334 void
336 {
337  checkTransients(entry);
338 
339  // The "hittingRequiresCollapsing() has an active writer" checks below
340  // protect callers from getting stuck and/or from using a stale revalidation
341  // reply. However, these protections are not reliable because the writer may
342  // disappear at any time and/or without a trace. Collapsing adds risks...
343  if (entry.hittingRequiresCollapsing()) {
344  if (entry.hasTransients()) {
345  // Too late to check here because the writer may be gone by now, but
346  // Transients do check when they setCollapsingRequirement().
347  } else {
348  // a local writer must hold a lock on its writable entry
349  if (!(entry.locked() && entry.isAccepting()))
350  throw TextException("no local writer", Here());
351  }
352  }
353 }
354 
355 StoreEntry *
357 {
358  if (const auto entry = peek(key)) {
359  try {
360  if (!entry->key)
361  allowSharing(*entry, key);
362  checkFoundCandidate(*entry);
363  entry->touch();
364  referenceBusy(*entry);
365  return entry;
366  } catch (const std::exception &ex) {
367  debugs(20, 2, "failed with " << *entry << ": " << ex.what());
368  entry->release();
369  // fall through
370  }
371  }
372  return NULL;
373 }
374 
376 void
378 {
379  // TODO: refactor to throw on anchorToCache() inSync errors!
380 
381  // anchorToCache() below and many find() callers expect a registered entry
382  addReading(&entry, key);
383 
384  if (entry.hasTransients()) {
385  // store hadWriter before computing `found`; \see Transients::get()
386  const auto hadWriter = transients->hasWriter(entry);
387  bool inSync = false;
388  const bool found = anchorToCache(entry, inSync);
389  if (found && !inSync)
390  throw TexcHere("cannot sync");
391  if (!found) {
392  // !found should imply hittingRequiresCollapsing() regardless of writer presence
393  if (!entry.hittingRequiresCollapsing()) {
394  debugs(20, DBG_IMPORTANT, "BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry);
395  throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here());
396  }
397 
398  if (!hadWriter) {
399  // prevent others from falling into the same trap
400  throw TextException("unattached transients entry missing writer", Here());
401  }
402  }
403  }
404 }
405 
406 StoreEntry *
408 {
409  // We could check for mem_obj presence (and more), moving and merging some
410  // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
411  // but that would mean polluting Store with HTCP/ICP code. Instead, we
412  // should encapsulate callback-related data in a protocol-neutral MemObject
413  // member or use an HTCP/ICP-specific index rather than store_table.
414 
415  // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
416  return static_cast<StoreEntry*>(hash_lookup(store_table, key));
417 }
418 
422 StoreEntry *
424 {
425  if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
426  // callers must only search for public entries
427  assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
428  assert(e->publicKey());
429  checkTransients(*e);
430 
431  // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
432  // because their backing store slot may be gone already.
433  return e;
434  }
435  return nullptr;
436 }
437 
438 StoreEntry *
440 {
441  debugs(20, 3, storeKeyText(key));
442 
443  if (markedForDeletion(key)) {
444  debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
445  return nullptr;
446  }
447 
448  if (StoreEntry *e = peekAtLocal(key)) {
449  debugs(20, 3, "got local in-transit entry: " << *e);
450  return e;
451  }
452 
453  // Must search transients before caches because we must sync those we find.
454  if (transients) {
455  if (StoreEntry *e = transients->get(key)) {
456  debugs(20, 3, "got shared in-transit entry: " << *e);
457  return e;
458  }
459  }
460 
461  if (sharedMemStore) {
462  if (StoreEntry *e = sharedMemStore->get(key)) {
463  debugs(20, 3, HERE << "got mem-cached entry: " << *e);
464  return e;
465  }
466  }
467 
468  if (swapDir) {
469  if (StoreEntry *e = swapDir->get(key)) {
470  debugs(20, 3, "got disk-cached entry: " << *e);
471  return e;
472  }
473  }
474 
475  debugs(20, 4, "cannot locate " << storeKeyText(key));
476  return nullptr;
477 }
478 
479 bool
481 {
482  return transients && e.hasTransients() && transients->isReader(e);
483 }
484 
485 bool
487 {
488  return transients && e.hasTransients() && transients->isWriter(e);
489 }
490 
491 int64_t
493 {
494  return swapDir ? swapDir->accumulateMore(entry) : 0;
495  // The memory cache should not influence for-swapout accumulation decision.
496 }
497 
498 // Must be called from StoreEntry::release() or releaseRequest() because
499 // those methods currently manage local indexing of StoreEntry objects.
500 // TODO: Replace StoreEntry::release*() with Root().evictCached().
501 void
503 {
504  debugs(20, 7, e);
505  if (transients)
506  transients->evictCached(e);
507  memoryEvictCached(e);
508  if (swapDir)
509  swapDir->evictCached(e);
510 }
511 
512 void
514 {
515  debugs(20, 7, storeKeyText(key));
516 
517  if (StoreEntry *entry = peekAtLocal(key)) {
518  debugs(20, 5, "marking local in-transit " << *entry);
519  entry->release(true);
520  return;
521  }
522 
523  if (sharedMemStore)
524  sharedMemStore->evictIfFound(key);
525  if (swapDir)
526  swapDir->evictIfFound(key);
527  if (transients)
528  transients->evictIfFound(key);
529 }
530 
532 bool
533 Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
534 {
535  // XXX: We count mem_nodes but may free shared memory pages instead.
536  const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
537  debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
538  return fits;
539 }
540 
541 void
542 Store::Controller::freeMemorySpace(const int bytesRequired)
543 {
544  const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
545 
546  if (memoryCacheHasSpaceFor(pagesRequired))
547  return;
548 
549  // XXX: When store_pages_max is smaller than pagesRequired, we should not
550  // look for more space (but we do because we want to abandon idle entries?).
551 
552  // limit our performance impact to one walk per second
553  static time_t lastWalk = 0;
554  if (lastWalk == squid_curtime)
555  return;
556  lastWalk = squid_curtime;
557 
558  debugs(20, 2, "need " << pagesRequired << " pages");
559 
560  // let abandon()/handleIdleEntry() know about the impeding memory shortage
561  memoryPagesDebt_ = pagesRequired;
562 
563  // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
564  // XXX: Limit iterations by time, not arbitrary count.
565  const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
566  int removed = 0;
567  while (const auto entry = walker->Next(walker)) {
568  // Abandoned memory cache entries are purged during memory shortage.
569  entry->abandon(__FUNCTION__); // may delete entry
570  ++removed;
571 
572  if (memoryCacheHasSpaceFor(pagesRequired))
573  break;
574  }
575  // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
576  debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
577  walker->Done(walker);
578  memoryPagesDebt_ = 0;
579 }
580 
581 // move this into [non-shared] memory cache class when we have one
583 bool
585 {
586  if (!e.memoryCachable())
587  return false;
588 
589  // does the current and expected size obey memory caching limits?
590  assert(e.mem_obj);
591  const int64_t loadedSize = e.mem_obj->endOffset();
592  const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
593  const int64_t ramSize = max(loadedSize, expectedSize);
594  const int64_t ramLimit = min(
595  static_cast<int64_t>(Config.memMaxSize),
596  static_cast<int64_t>(Config.Store.maxInMemObjSize));
597  return ramSize <= ramLimit;
598 }
599 
600 void
601 Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
602 {
603  bool keepInLocalMemory = false;
604  if (sharedMemStore)
605  sharedMemStore->write(e); // leave keepInLocalMemory false
606  else if (localMemStore)
607  keepInLocalMemory = keepForLocalMemoryCache(e);
608 
609  debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
610 
611  if (!keepInLocalMemory)
612  e.trimMemory(preserveSwappable);
613 }
614 
617 void
619 {
620  // TODO: Untangle memory caching from mem_obj.
621  if (sharedMemStore)
622  sharedMemStore->evictCached(e);
623  else // TODO: move into [non-shared] memory cache class when we have one
624  if (!e.locked())
625  e.destroyMemObject();
626 }
627 
628 void
630 {
631  if (sharedMemStore)
632  sharedMemStore->disconnect(e);
633  // else nothing to do for non-shared memory cache
634 }
635 
636 void
638 {
639  // Marking the transients entry is sufficient to prevent new readers from
640  // starting to wait for `e` updates and to inform the current readers (and,
641  // hence, Broadcast() recipients) about the underlying Store problems.
642  if (transients && e.hasTransients())
643  transients->evictCached(e);
644 }
645 
646 void
648 {
649  // transients->isWriter(e) is false if `e` is writing to its second store
650  // after finishing writing to its first store: At the end of the first swap
651  // out, the transients writer becomes a reader and (XXX) we never switch
652  // back to writing, even if we start swapping out again (to another store).
653  if (transients && e.hasTransients() && transients->isWriter(e))
654  transients->completeWriting(e);
655 }
656 
657 int
659 {
660  return (transients && e.hasTransients()) ?
661  transients->readers(e) : 0;
662 }
663 
664 void
666 {
667  if (transients)
668  transients->disconnect(e);
669 }
670 
671 void
673 {
674  if (transients)
675  transients->clearCollapsingRequirement(e);
676 }
677 
678 void
680 {
681  bool keepInLocalMemory = false;
682 
683  if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
684  // Icons (and cache digests?) should stay in store_table until we
685  // have a dedicated storage for them (that would not purge them).
686  // They are not managed [well] by any specific Store handled below.
687  keepInLocalMemory = true;
688  } else if (sharedMemStore) {
689  // leave keepInLocalMemory false; sharedMemStore maintains its own cache
690  } else if (localMemStore) {
691  keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
692  // the local memory cache is not overflowing
693  memoryCacheHasSpaceFor(memoryPagesDebt_);
694  }
695 
696  // An idle, unlocked entry that only belongs to a SwapDir which controls
697  // its own index, should not stay in the global store_table.
698  if (!dereferenceIdle(e, keepInLocalMemory)) {
699  debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
700  destroyStoreEntry(static_cast<hash_link*>(&e));
701  return;
702  }
703 
704  debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
705 
706  // TODO: move this into [non-shared] memory cache class when we have one
707  if (keepInLocalMemory) {
709  e.mem_obj->unlinkRequest();
710  return;
711  }
712 
713  // We know the in-memory data will be gone. Get rid of the entire entry if
714  // it has nothing worth preserving on disk either.
715  if (!e.swappedOut()) {
716  e.release(); // deletes e
717  return;
718  }
719 
720  memoryEvictCached(e); // may already be gone
721  // and keep the entry in store_table for its on-disk data
722 }
723 
724 void
726 {
727  Must(old);
728  Must(old->mem_obj);
729  Must(e304.mem_obj);
730 
731  // updateOnNotModified() may be called many times for the same old entry.
732  // e304.mem_obj->appliedUpdates value distinguishes two cases:
733  // false: Independent store clients revalidating the same old StoreEntry.
734  // Each such update uses its own e304. The old StoreEntry
735  // accumulates such independent updates.
736  // true: Store clients feeding off the same 304 response. Each such update
737  // uses the same e304. For timestamps correctness and performance
738  // sake, it is best to detect and skip such repeated update calls.
739  if (e304.mem_obj->appliedUpdates) {
740  debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
741  return;
742  }
743  e304.mem_obj->appliedUpdates = true;
744 
745  if (!old->updateOnNotModified(e304)) {
746  debugs(20, 5, "updated nothing in " << *old << " with " << e304);
747  return;
748  }
749 
750  if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
751  sharedMemStore->updateHeaders(old);
752 
753  if (old->swap_dirn > -1)
754  swapDir->updateHeaders(old);
755 }
756 
757 bool
759  const HttpRequestMethod &reqMethod)
760 {
761  const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
762  // set the flag now so that it gets copied into the Transients entry
763  e->setCollapsingRequirement(true);
764  if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
765  debugs(20, 3, "may " << (transients && e->hasTransients() ?
766  "SMP-" : "locally-") << "collapse " << *e);
767  return true;
768  }
769  // paranoid cleanup; the flag is meaningless for private entries
770  e->setCollapsingRequirement(false);
771  return false;
772 }
773 
774 void
776 {
777  if (transients)
778  transients->monitorIo(e, key, Store::ioReading);
779  e->hashInsert(key);
780 }
781 
782 void
784 {
785  assert(e);
786  if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
787  return; // constant memory-resident entries do not need transients
788 
789  if (transients)
790  transients->monitorIo(e, key, Store::ioWriting);
791  // else: non-SMP configurations do not need transients
792 }
793 
794 void
796 {
797  assert(transients);
798 
799  StoreEntry *collapsed = transients->findCollapsed(xitIndex);
800  if (!collapsed) { // the entry is no longer active, ignore update
801  debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
802  return;
803  }
804 
805  if (!collapsed->locked()) {
806  debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
807  handleIdleEntry(*collapsed);
808  return;
809  }
810 
811  assert(collapsed->mem_obj);
812 
813  if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
814  debugs(20, 3, "skipping already aborted " << *collapsed);
815  return;
816  }
817 
818  debugs(20, 7, "syncing " << *collapsed);
819 
820  Transients::EntryStatus entryStatus;
821  transients->status(*collapsed, entryStatus);
822 
823  if (!entryStatus.collapsed) {
824  debugs(20, 5, "removing collapsing requirement for " << *collapsed << " since remote writer probably got headers");
825  collapsed->setCollapsingRequirement(false);
826  }
827 
828  if (entryStatus.waitingToBeFreed) {
829  debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
830  collapsed->release(true); // may already be marked
831  }
832 
833  if (transients->isWriter(*collapsed))
834  return; // readers can only change our waitingToBeFreed flag
835 
836  assert(transients->isReader(*collapsed));
837 
838  if (entryStatus.abortedByWriter) {
839  debugs(20, 3, "aborting " << *collapsed << " because its writer has aborted");
840  collapsed->abort();
841  return;
842  }
843 
844  if (entryStatus.collapsed && !collapsed->hittingRequiresCollapsing()) {
845  debugs(20, 3, "aborting " << *collapsed << " due to writer/reader collapsing state mismatch");
846  collapsed->abort();
847  return;
848  }
849 
850  bool found = false;
851  bool inSync = false;
852  if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
853  found = true;
854  inSync = true;
855  debugs(20, 7, "fully mem-loaded " << *collapsed);
856  } else if (sharedMemStore && collapsed->hasMemStore()) {
857  found = true;
858  inSync = sharedMemStore->updateAnchored(*collapsed);
859  // TODO: handle entries attached to both memory and disk
860  } else if (swapDir && collapsed->hasDisk()) {
861  found = true;
862  inSync = swapDir->updateAnchored(*collapsed);
863  } else {
864  found = anchorToCache(*collapsed, inSync);
865  }
866 
867  if (entryStatus.waitingToBeFreed && !found) {
868  debugs(20, 3, "aborting unattached " << *collapsed <<
869  " because it was marked for deletion before we could attach it");
870  collapsed->abort();
871  return;
872  }
873 
874  if (inSync) {
875  debugs(20, 5, "synced " << *collapsed);
876  collapsed->invokeHandlers();
877  return;
878  }
879 
880  if (found) { // unrecoverable problem syncing this entry
881  debugs(20, 3, "aborting unsyncable " << *collapsed);
882  collapsed->abort();
883  return;
884  }
885 
886  // the entry is still not in one of the caches
887  debugs(20, 7, "waiting " << *collapsed);
888 }
889 
893 bool
895 {
896  assert(entry.hasTransients());
897  assert(transientsReader(entry));
898 
899  debugs(20, 7, "anchoring " << entry);
900 
901  bool found = false;
902  if (sharedMemStore)
903  found = sharedMemStore->anchorToCache(entry, inSync);
904  if (!found && swapDir)
905  found = swapDir->anchorToCache(entry, inSync);
906 
907  if (found) {
908  if (inSync)
909  debugs(20, 7, "anchored " << entry);
910  else
911  debugs(20, 5, "failed to anchor " << entry);
912  } else {
913  debugs(20, 7, "skipping not yet cached " << entry);
914  }
915 
916  return found;
917 }
918 
919 bool
921 {
922  return MemStore::Enabled() || Disks::SmpAware();
923 }
924 
925 void
927 {
928  if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
929  return;
930  assert(!transients || e.hasTransients());
931 }
932 
933 namespace Store {
935 }
936 
939 {
940  assert(TheRoot);
941  return *TheRoot;
942 }
943 
944 void
946 {
947  TheRoot = root ? root : new Controller;
948 }
949 
950 void
952 {
953  TheRoot = nullptr;
954 }
955 
int transientReaders(const StoreEntry &) const
number of the transient entry readers some time ago
Definition: Controller.cc:658
static size_t StoreMemSize()
Definition: mem_node.cc:61
void memoryDisconnect(StoreEntry &)
disassociates the entry from the memory cache, preserving cached data
Definition: Controller.cc:629
class Ping::pingStats_ stats
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: Controller.cc:170
#define Here()
source code location of the caller
Definition: Here.h:15
#define SM_PAGE_SIZE
Definition: defines.h:102
void memoryOut(StoreEntry &, const bool preserveSwappable)
called to get rid of no longer needed entry data in RAM, if any
Definition: Controller.cc:601
bool updateOnNotModified(const StoreEntry &e304)
Definition: store.cc:1514
struct SquidConfig::@96 Swap
virtual void create() override
create system resources needed for this store to operate in the future
Definition: Controller.cc:79
unsigned char cache_key
Store key.
Definition: forward.h:29
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition: MemStore.h:68
bool makePublic(const KeyScope keyScope=ksDefault)
Definition: store.cc:165
bool hasReadableDiskEntry(const StoreEntry &) const
whether there is a disk entry with e.key
Definition: Controller.cc:328
virtual void sync() override
prepare for shutdown
Definition: Controller.cc:222
MemObject * mem_obj
Definition: Store.h:213
StoreEntry * peekAtLocal(const cache_key *)
Definition: Controller.cc:423
bool waitingToBeFreed
whether the entry was marked for deletion
Definition: Transients.h:34
size_t memMaxSize
Definition: SquidConfig.h:88
@ ksRevalidation
Definition: store_key_md5.h:20
static bool SmpAware()
whether there are any SMP-aware storages
Definition: Controller.cc:920
void addReading(StoreEntry *, const cache_key *)
Definition: Controller.cc:775
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:901
void FreeMemory()
undo Init()
Definition: Controller.cc:951
void checkTransients(const StoreEntry &) const
Definition: Controller.cc:926
MemCache memCache
current [shared] memory caching state for the entry
Definition: MemObject.h:186
static bool SmpAware()
whether any disk cache is SMP-aware
Definition: Disks.cc:548
@ KEY_PRIVATE
Definition: enums.h:102
virtual void init() override
Definition: Controller.cc:59
RemovalPurgeWalker *(* PurgeInit)(RemovalPolicy *policy, int max_scan)
Definition: RemovalPolicy.h:51
int64_t expectedReplySize() const
Definition: MemObject.cc:259
void referenceBusy(StoreEntry &e)
update reference counters of the recently touched entry
Definition: Controller.cc:248
const A & max(A const &lhs, A const &rhs)
bool transientsReader(const StoreEntry &) const
whether the entry is in "reading from Transients" I/O state
Definition: Controller.cc:480
#define PRIu64
Definition: types.h:120
virtual int callback() override
called once every main loop iteration; TODO: Move to UFS code.
Definition: Controller.cc:233
bool memoryCacheHasSpaceFor(const int pagesRequired) const
whether the memory cache is allowed to store that many additional pages
Definition: Controller.cc:533
::Transients Transients
Definition: forward.h:52
SQUIDCEXTERN void hashFreeItems(hash_table *, HASHFREE *)
Definition: hash.cc:256
uint16_t flags
Definition: Store.h:224
int hot_obj_count
void memoryEvictCached(StoreEntry &)
Definition: Controller.cc:618
bool isAccepting() const
Definition: store.cc:2050
bool IamWorkerProcess()
whether the current process handles HTTP transactions and such
Definition: stub_tools.cc:49
Io io
current I/O state
Definition: MemObject.h:184
int64_t endOffset() const
Definition: MemObject.cc:235
#define DBG_CRITICAL
Definition: Debug.h:45
int store_swap_low
SQUIDCEXTERN void hashFreeMemory(hash_table *)
Definition: hash.cc:272
RemovalPolicy * mem_policy
Definition: MemObject.cc:45
#define PROF_stop(probename)
Definition: Profiler.h:63
#define DBG_IMPORTANT
Definition: Debug.h:46
void transientsCompleteWriting(StoreEntry &)
marks the entry completed for collapsed requests
Definition: Controller.cc:647
virtual void evictCached(StoreEntry &) override
Definition: Controller.cc:502
void destroyMemObject()
Definition: store.cc:398
bool allowCollapsing(StoreEntry *, const RequestFlags &, const HttpRequestMethod &)
tries to make the entry available for collapsing future requests
Definition: Controller.cc:758
int64_t accumulateMore(StoreEntry &) const
Definition: Controller.cc:492
int highWaterMark
Definition: SquidConfig.h:82
void(* Referenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:48
bool abortedByWriter
whether the entry was aborted
Definition: Transients.h:33
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:55
void invokeHandlers()
static pid_t pid
Definition: IcmpSquid.cc:35
void addWriting(StoreEntry *, const cache_key *)
Definition: Controller.cc:783
shared entry metadata, used for synchronization
Definition: Transients.h:30
@ ENTRY_SPECIAL
Definition: enums.h:84
void updateLimits()
slowly calculate (and cache) hi/lo watermarks and similar limits
Definition: Controller.cc:198
virtual void init() override
Definition: Transients.cc:41
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
void transientsDisconnect(StoreEntry &)
disassociates the entry from the intransit table
Definition: Controller.cc:665
StoreEntry * find(const cache_key *)
Definition: Controller.cc:356
static bool Enabled()
Can we create and initialize Transients?
Definition: Transients.h:94
#define NULL
Definition: types.h:166
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:128
@ IN_MEMORY
Definition: enums.h:36
void abort()
Definition: store.cc:1124
void stopSharing(StoreEntry &)
stop any current (and prevent any future) SMP sharing of the given entry
Definition: Controller.cc:637
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1990
sdirno swap_dirn
Definition: Store.h:230
#define EBIT_TEST(flag, bit)
Definition: defines.h:107
void setCollapsingRequirement(const bool required)
allow or forbid collapsed requests feeding
Definition: store.cc:2074
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:157
virtual void evictIfFound(const cache_key *) override
Definition: Controller.cc:513
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition: Store.h:205
summary view of all disk caches (cache_dirs) combined
Definition: Disks.h:18
bool markedForDeletion(const cache_key *key) const
Definition: Controller.cc:312
static size_t inUseCount()
Definition: store.cc:197
#define assert(EX)
Definition: assert.h:19
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: Controller.cc:115
FREE destroyStoreEntry
virtual void stat(StoreEntry &) const override
Definition: Controller.cc:141
size_t store_pages_max
StoreSearch * NewLocalSearch()
Definition: LocalSearch.cc:44
static size_t inUseCount()
Definition: MemObject.cc:48
void syncCollapsed(const sfileno)
Update local intransit entry after changes made by appending worker.
Definition: Controller.cc:795
hash_table * store_table
pid_t WaitForAnyPid(PidStatus &status, int flags)
Definition: tools.h:111
mem_status_t mem_status
Definition: Store.h:232
StoreSearch * search()
Definition: Controller.cc:215
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:139
time_t squid_curtime
Definition: stub_time.cc:17
void transientsClearCollapsingRequirement(StoreEntry &e)
removes collapsing requirement (for future hits)
Definition: Controller.cc:672
bool anchorToCache(StoreEntry &e, bool &inSync)
Definition: Controller.cc:894
size_t maxInMemObjSize
Definition: SquidConfig.h:271
signed_int32_t sfileno
Definition: forward.h:22
int64_t store_maxobjsize
void(* Dereferenced)(RemovalPolicy *policy, const StoreEntry *entry, RemovalPolicyNode *node)
Definition: RemovalPolicy.h:49
void Init(Controller *root=nullptr)
initialize the storage module; a custom root is used by unit tests only
Definition: Controller.cc:945
int lowWaterMark
Definition: SquidConfig.h:83
virtual uint64_t currentSize() const override
current size
Definition: Controller.cc:177
void setMemStatus(mem_status_t)
Definition: store.cc:1580
static constexpr Io ioDone
Definition: MemObject.h:166
@ ksDefault
Definition: store_key_md5.h:19
virtual uint64_t maxSize() const override
Definition: Controller.cc:163
static size_t InUseCount()
Definition: mem_node.cc:55
bool appliedUpdates
Definition: MemObject.h:89
virtual void init() override
Definition: MemStore.cc:171
bool keepForLocalMemoryCache(StoreEntry &e) const
whether e should be kept in local RAM for possible future caching
Definition: Controller.cc:584
KeyScope
Definition: store_key_md5.h:18
@ ENTRY_ABORTED
Definition: enums.h:115
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Controller.cc:191
StoreEntry * findCallbackXXX(const cache_key *)
Definition: Controller.cc:407
void updateOnNotModified(StoreEntry *old, StoreEntry &e304)
using a 304 response, update the old entry (metadata and reply headers)
Definition: Controller.cc:725
an std::runtime_error with thrower location info
Definition: TextException.h:19
int PidStatus
Definition: tools.h:94
static RefCount< Controller > TheRoot
Definition: Controller.cc:934
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: Controller.cc:93
void handleIdleEntry(StoreEntry &)
called when the entry is no longer needed by any transaction
Definition: Controller.cc:679
void unlinkRequest()
Definition: MemObject.h:55
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:69
StoreEntry * peek(const cache_key *)
Definition: Controller.cc:439
const char * storeKeyText(const cache_key *key)
void allowSharing(StoreEntry &, const cache_key *)
indexes and adds SMP-tracking for an ephemeral peek() result
Definition: Controller.cc:377
bool transientsWriter(const StoreEntry &) const
whether the entry is in "writing to Transients" I/O state
Definition: Controller.cc:486
bool hasTransients() const
whether there is a corresponding locked transients table entry
Definition: Store.h:203
bool collapsed
whether the entry allows collapsing
Definition: Transients.h:35
void release(const bool shareable=false)
Definition: store.cc:1196
void hashInsert(const cache_key *)
Definition: store.cc:438
bool memoryCachable()
checkCachable() and can be cached in memory
Definition: store.cc:1331
void freeMemorySpace(const int spaceRequired)
Definition: Controller.cc:542
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: Controller.cc:184
void trimMemory(const bool preserveSwappable)
Definition: store.cc:1875
struct SquidConfig::@110 Store
void checkFoundCandidate(const StoreEntry &) const
flags problematic entries before find() commits to finalizing/returning them
Definition: Controller.cc:335
bool markedForDeletionAndAbandoned(const StoreEntry &) const
Definition: Controller.cc:319
bool hittingRequiresCollapsing() const
whether this entry can feed collapsed requests and only them
Definition: Store.h:208
int locked() const
Definition: Store.h:136
@ ioWriting
Definition: forward.h:40
RemovalPolicyNode repl
Definition: MemObject.h:196
bool swappedOut() const
whether the entire entry is now on disk (possibly marked for deletion)
Definition: Store.h:126
SQUIDCEXTERN hash_link * hash_lookup(hash_table *, const void *)
Definition: hash.cc:147
#define false
Definition: GnuRegex.c:233
int store_swap_high
@ ioReading
Definition: forward.h:40
const A & min(A const &lhs, A const &rhs)
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:13
#define PROF_start(probename)
Definition: Profiler.h:62
bool dereferenceIdle(StoreEntry &, bool wantsLocalMemory)
Definition: Controller.cc:273
class SquidConfig Config
Definition: SquidConfig.cc:12
virtual ~Controller() override
Definition: Controller.cc:45
Controller & Root()
safely access controller singleton
Definition: Controller.cc:938

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors