MemStore.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 20 Memory Cache */
10 
11 #include "squid.h"
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
18 #include "MemStore.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
23 #include "tools.h"
24 
26 static const SBuf MapLabel("cache_mem_map");
28 static const char *SpaceLabel = "cache_mem_space";
30 static const char *ExtrasLabel = "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
32 
35 class ShmWriter: public Packable
36 {
37 public:
38  ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
39 
40  /* Packable API */
41  virtual void append(const char *aBuf, int aSize) override;
42  virtual void vappendf(const char *fmt, va_list ap) override;
43 
44 public:
46 
50 
53 
54  uint64_t totalWritten;
55 
56 protected:
57  void copyToShm();
59 
60 private:
62  const sfileno fileNo;
63 
64  /* set by (and only valid during) append calls */
65  const char *buf;
66  int bufSize;
67  int bufWritten;
68 };
69 
70 /* ShmWriter */
71 
72 ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
73  entry(anEntry),
74  firstSlice(aFirstSlice),
75  lastSlice(firstSlice),
76  totalWritten(0),
77  store(aStore),
78  fileNo(aFileNo),
79  buf(nullptr),
80  bufSize(0),
81  bufWritten(0)
82 {
83  Must(entry);
84 }
85 
86 void
87 ShmWriter::append(const char *aBuf, int aBufSize)
88 {
89  Must(!buf);
90  buf = aBuf;
91  bufSize = aBufSize;
92  if (bufSize) {
93  Must(buf);
94  bufWritten = 0;
95  copyToShm();
96  }
97  buf = nullptr;
98  bufSize = 0;
99  bufWritten = 0;
100 }
101 
102 void
103 ShmWriter::vappendf(const char *fmt, va_list ap)
104 {
105  SBuf vaBuf;
106  va_list apCopy;
107  va_copy(apCopy, ap);
108  vaBuf.vappendf(fmt, apCopy);
109  va_end(apCopy);
110  append(vaBuf.rawContent(), vaBuf.length());
111 }
112 
114 void
116 {
117  Must(bufSize > 0); // do not use up shared memory pages for nothing
118  Must(firstSlice < 0 || lastSlice >= 0);
119 
120  // fill, skip slices that are already full
121  while (bufWritten < bufSize) {
123  if (firstSlice < 0)
125  copyToShmSlice(slice);
126  }
127 
128  debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
129 }
130 
132 void
134 {
136  debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
137  page);
138 
139  Must(bufWritten <= bufSize);
140  const int64_t writingDebt = bufSize - bufWritten;
141  const int64_t pageSize = Ipc::Mem::PageSize();
142  const int64_t sliceOffset = totalWritten % pageSize;
143  const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
144  memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
145  copySize);
146 
147  debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
148  entry << " from " << sliceOffset << " in " << page);
149 
150  slice.size += copySize;
151  bufWritten += copySize;
152  totalWritten += copySize;
153  // fresh anchor.basics.swap_file_sz is already set [to the stale value]
154 
155  // either we wrote everything or we filled the entire slice
156  Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
157 }
158 
159 /* MemStore */
160 
161 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
162 {
163 }
164 
166 {
167  delete map;
168 }
169 
170 void
172 {
173  const int64_t entryLimit = EntryLimit();
174  if (entryLimit <= 0)
175  return; // no shared memory cache configured or a misconfiguration
176 
177  // check compatibility with the disk cache, if any
178  if (Config.cacheSwap.n_configured > 0) {
179  const int64_t diskMaxSize = Store::Root().maxObjectSize();
180  const int64_t memMaxSize = maxObjectSize();
181  if (diskMaxSize == -1) {
182  debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
183  "is unlimited but mem-cache maximum object size is " <<
184  memMaxSize / 1024.0 << " KB");
185  } else if (diskMaxSize > memMaxSize) {
186  debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
187  "is too large for mem-cache: " <<
188  diskMaxSize / 1024.0 << " KB > " <<
189  memMaxSize / 1024.0 << " KB");
190  }
191  }
192 
195 
196  Must(!map);
197  map = new MemStoreMap(MapLabel);
198  map->cleaner = this;
199 }
200 
201 void
203 {
204  const size_t pageSize = Ipc::Mem::PageSize();
205 
206  stats.mem.shared = true;
207  stats.mem.capacity =
209  stats.mem.size =
211  stats.mem.count = currentCount();
212 }
213 
214 void
216 {
217  storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
218 
219  storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
220  storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
221  currentSize() / 1024.0,
223 
224  if (map) {
225  const int entryLimit = map->entryLimit();
226  const int slotLimit = map->sliceLimit();
227  storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
228  if (entryLimit > 0) {
229  storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
230  currentCount(), (100.0 * currentCount() / entryLimit));
231  }
232 
233  storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
234  if (slotLimit > 0) {
235  const unsigned int slotsFree =
237  if (slotsFree <= static_cast<const unsigned int>(slotLimit)) {
238  const int usedSlots = slotLimit - static_cast<const int>(slotsFree);
239  storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
240  usedSlots, (100.0 * usedSlots / slotLimit));
241  }
242 
243  if (slotLimit < 100) { // XXX: otherwise too expensive to count
246  stats.dump(e);
247  }
248  }
249  }
250 }
251 
252 void
254 {
255 }
256 
257 uint64_t
259 {
260  return 0; // XXX: irrelevant, but Store parent forces us to implement this
261 }
262 
263 uint64_t
265 {
266  return Config.memMaxSize;
267 }
268 
269 uint64_t
271 {
274 }
275 
276 uint64_t
278 {
279  return map ? map->entryCount() : 0;
280 }
281 
282 int64_t
284 {
286 }
287 
288 void
290 {
291 }
292 
293 bool
295 {
296  // no need to keep e in the global store_table for us; we have our own map
297  return false;
298 }
299 
300 StoreEntry *
302 {
303  if (!map)
304  return NULL;
305 
306  sfileno index;
307  const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
308  if (!slot)
309  return NULL;
310 
311  // create a brand new store entry and initialize it with stored info
312  StoreEntry *e = new StoreEntry();
313 
314  // XXX: We do not know the URLs yet, only the key, but we need to parse and
315  // store the response for the Root().find() callers to be happy because they
316  // expect IN_MEMORY entries to already have the response headers and body.
317  e->createMemObject();
318 
319  anchorEntry(*e, index, *slot);
320 
321  const bool copied = copyFromShm(*e, index, *slot);
322 
323  if (copied)
324  return e;
325 
326  debugs(20, 3, "failed for " << *e);
327  map->freeEntry(index); // do not let others into the same trap
328  destroyStoreEntry(static_cast<hash_link *>(e));
329  return NULL;
330 }
331 
332 void
334 {
335  if (!map)
336  return;
337 
338  Ipc::StoreMapUpdate update(updatedE);
339  assert(updatedE);
340  assert(updatedE->mem_obj);
341  if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
342  return;
343 
344  try {
345  updateHeadersOrThrow(update);
346  } catch (const std::exception &ex) {
347  debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
348  map->abortUpdating(update);
349  }
350 }
351 
352 void
354 {
355  // our +/- hdr_sz math below does not work if the chains differ [in size]
357 
358  const uint64_t staleHdrSz = update.entry->mem().baseReply().hdr_sz;
359  debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
360 
361  /* we will need to copy same-slice payload after the stored headers later */
362  Must(staleHdrSz > 0);
363  update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
364  Must(update.stale.splicingPoint >= 0);
365  Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
366 
367  Must(update.stale.anchor);
368  ShmWriter writer(*this, update.entry, update.fresh.fileNo);
370  const uint64_t freshHdrSz = writer.totalWritten;
371  debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
372 
373  /* copy same-slice payload remaining after the stored headers */
374  const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
375  const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
376  const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
377  Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
378  Must(slice.size >= headersInLastSlice);
379  const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
380  const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
381  char *page = static_cast<char*>(PagePointer(extra.page));
382  debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
383  writer.append(page + headersInLastSlice, payloadInLastSlice);
384  update.fresh.splicingPoint = writer.lastSlice;
385 
386  update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
387  update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
388 
389  map->closeForUpdating(update);
390 }
391 
392 bool
393 MemStore::anchorToCache(StoreEntry &entry, bool &inSync)
394 {
395  if (!map)
396  return false;
397 
398  sfileno index;
399  const Ipc::StoreMapAnchor *const slot = map->openForReading(
400  reinterpret_cast<cache_key*>(entry.key), index);
401  if (!slot)
402  return false;
403 
404  anchorEntry(entry, index, *slot);
405  inSync = updateAnchoredWith(entry, index, *slot);
406  return true; // even if inSync is false
407 }
408 
409 bool
411 {
412  if (!map)
413  return false;
414 
415  assert(entry.mem_obj);
416  assert(entry.hasMemStore());
417  const sfileno index = entry.mem_obj->memCache.index;
418  const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
419  return updateAnchoredWith(entry, index, anchor);
420 }
421 
423 bool
425 {
426  entry.swap_file_sz = anchor.basics.swap_file_sz;
427  const bool copied = copyFromShm(entry, index, anchor);
428  return copied;
429 }
430 
432 void
434 {
435  assert(!e.hasDisk()); // no conflict with disk entry basics
436  anchor.exportInto(e);
437 
438  assert(e.mem_obj);
439  if (anchor.complete()) {
443  } else {
445  assert(e.mem_obj->object_sz < 0);
447  }
448 
450 
452  mc.index = index;
454 }
455 
457 bool
459 {
460  debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
461  assert(e.mem_obj);
462 
463  // emulate the usual Store code but w/o inapplicable checks and callbacks:
464 
465  Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
466  bool wasEof = anchor.complete() && sid < 0;
467  int64_t sliceOffset = 0;
468  while (sid >= 0) {
469  const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
470  // slice state may change during copying; take snapshots now
471  wasEof = anchor.complete() && slice.next < 0;
472  const Ipc::StoreMapSlice::Size wasSize = slice.size;
473 
474  debugs(20, 8, "entry " << index << " slice " << sid << " eof " <<
475  wasEof << " wasSize " << wasSize << " <= " <<
476  anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
477  " mem.endOffset " << e.mem_obj->endOffset());
478 
479  if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
480  // size of the slice data that we already copied
481  const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
482  assert(prefixSize <= wasSize);
483 
484  const MemStoreMapExtras::Item &extra = extras->items[sid];
485 
486  char *page = static_cast<char*>(PagePointer(extra.page));
487  const StoreIOBuffer sliceBuf(wasSize - prefixSize,
488  e.mem_obj->endOffset(),
489  page + prefixSize);
490  if (!copyFromShmSlice(e, sliceBuf, wasEof))
491  return false;
492  debugs(20, 8, "entry " << index << " copied slice " << sid <<
493  " from " << extra.page << '+' << prefixSize);
494  }
495  // else skip a [possibly incomplete] slice that we copied earlier
496 
497  // careful: the slice may have grown _and_ gotten the next slice ID!
498  if (slice.next >= 0) {
499  assert(!wasEof);
500  // here we know that slice.size may not change any more
501  if (wasSize >= slice.size) { // did not grow since we started copying
502  sliceOffset += wasSize;
503  sid = slice.next;
504  }
505  } else if (wasSize >= slice.size) { // did not grow
506  break;
507  }
508  }
509 
510  if (!wasEof) {
511  debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
512  anchor.basics.swap_file_sz << " bytes of " << e);
513  return true;
514  }
515 
516  debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
517  anchor.basics.swap_file_sz << " bytes of " << e);
518 
519  // from StoreEntry::complete()
523 
524  assert(e.mem_obj->object_sz >= 0);
525  assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
526  // would be nice to call validLength() here, but it needs e.key
527 
528  // we read the entire response into the local memory; no more need to lock
529  disconnect(e);
530  return true;
531 }
532 
534 bool
536 {
537  debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
538 
539  // from store_client::readBody()
540  // parse headers if needed; they might span multiple slices!
541  const auto rep = &e.mem().adjustableBaseReply();
542  if (rep->pstate < Http::Message::psParsed) {
543  // XXX: have to copy because httpMsgParseStep() requires 0-termination
544  MemBuf mb;
545  mb.init(buf.length+1, buf.length+1);
546  mb.append(buf.data, buf.length);
547  mb.terminate();
548  const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
549  if (result > 0) {
550  assert(rep->pstate == Http::Message::psParsed);
551  } else if (result < 0) {
552  debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
553  return false;
554  } else { // more slices are needed
555  assert(!eof);
556  }
557  }
558  debugs(20, 7, "rep pstate: " << rep->pstate);
559 
560  // local memory stores both headers and body so copy regardless of pstate
561  const int64_t offBefore = e.mem_obj->endOffset();
562  assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
563  const int64_t offAfter = e.mem_obj->endOffset();
564  // expect to write the entire buf because StoreEntry::write() never fails
565  assert(offAfter >= 0 && offBefore <= offAfter &&
566  static_cast<size_t>(offAfter - offBefore) == buf.length);
567  return true;
568 }
569 
571 bool
573 {
574  if (e.mem_status == IN_MEMORY) {
575  debugs(20, 5, "already loaded from mem-cache: " << e);
576  return false;
577  }
578 
579  if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
580  debugs(20, 5, "already written to mem-cache: " << e);
581  return false;
582  }
583 
584  if (!e.memoryCachable()) {
585  debugs(20, 7, HERE << "Not memory cachable: " << e);
586  return false; // will not cache due to entry state or properties
587  }
588 
589  assert(e.mem_obj);
590 
591  if (!e.mem_obj->vary_headers.isEmpty()) {
592  // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
593  debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
594  return false;
595  }
596 
597  const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
598  const int64_t loadedSize = e.mem_obj->endOffset();
599  const int64_t ramSize = max(loadedSize, expectedSize);
600  if (ramSize > maxObjectSize()) {
601  debugs(20, 5, HERE << "Too big max(" <<
602  loadedSize << ", " << expectedSize << "): " << e);
603  return false; // will not cache due to cachable entry size limits
604  }
605 
606  if (!e.mem_obj->isContiguous()) {
607  debugs(20, 5, "not contiguous");
608  return false;
609  }
610 
611  if (!map) {
612  debugs(20, 5, HERE << "No map to mem-cache " << e);
613  return false;
614  }
615 
616  if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
617  debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
618  return false;
619  }
620 
621  return true;
622 }
623 
625 bool
627 {
628  sfileno index = 0;
629  Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
630  if (!slot) {
631  debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
632  return false;
633  }
634 
635  assert(e.mem_obj);
636  e.mem_obj->memCache.index = index;
638  slot->set(e);
639  // Do not allow others to feed off an unknown-size entry because we will
640  // stop swapping it out if it grows too large.
641  if (e.mem_obj->expectedReplySize() >= 0)
642  map->startAppending(index);
643  e.memOutDecision(true);
644  return true;
645 }
646 
648 void
650 {
651  assert(map);
652  assert(e.mem_obj);
654 
655  const int64_t eSize = e.mem_obj->endOffset();
656  if (e.mem_obj->memCache.offset >= eSize) {
657  debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
658  e.mem_obj->memCache.offset << " >= " << eSize);
659  return; // nothing to do (yet)
660  }
661 
662  // throw if an accepted unknown-size entry grew too big or max-size changed
663  Must(eSize <= maxObjectSize());
664 
665  const int32_t index = e.mem_obj->memCache.index;
666  assert(index >= 0);
667  Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
668  lastWritingSlice = anchor.start;
669 
670  // fill, skip slices that are already full
671  // Optimize: remember lastWritingSlice in e.mem_obj
672  while (e.mem_obj->memCache.offset < eSize) {
675  if (anchor.start < 0)
676  anchor.start = lastWritingSlice;
677  copyToShmSlice(e, anchor, slice);
678  }
679 
680  debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
681 }
682 
684 void
686 {
688  debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
689  page);
690 
691  const int64_t bufSize = Ipc::Mem::PageSize();
692  const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
693  StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
694  static_cast<char*>(PagePointer(page)) + sliceOffset);
695 
696  // check that we kept everything or purge incomplete/sparse cached entry
697  const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
698  if (copied <= 0) {
699  debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
700  " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
701  " in " << page);
702  throw TexcHere("data_hdr.copy failure");
703  }
704 
705  debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
706  " from " << e.mem_obj->memCache.offset << " in " << page);
707 
708  slice.size += copied;
709  e.mem_obj->memCache.offset += copied;
711 }
712 
716 MemStore::nextAppendableSlice(const sfileno fileNo, sfileno &sliceOffset)
717 {
718  // allocate the very first slot for the entry if needed
719  if (sliceOffset < 0) {
720  Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
721  Must(anchor.start < 0);
722  Ipc::Mem::PageId page;
723  sliceOffset = reserveSapForWriting(page); // throws
724  extras->items[sliceOffset].page = page;
725  anchor.start = sliceOffset;
726  }
727 
728  const size_t sliceCapacity = Ipc::Mem::PageSize();
729  do {
730  Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
731 
732  if (slice.size >= sliceCapacity) {
733  if (slice.next >= 0) {
734  sliceOffset = slice.next;
735  continue;
736  }
737 
738  Ipc::Mem::PageId page;
739  slice.next = sliceOffset = reserveSapForWriting(page);
740  extras->items[sliceOffset].page = page;
741  debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
742  continue; // to get and return the slice at the new sliceOffset
743  }
744 
745  return slice;
746  } while (true);
747  /* not reached */
748 }
749 
753 {
754  Must(extras);
755  Must(sliceId >= 0);
756  Ipc::Mem::PageId page = extras->items[sliceId].page;
757  Must(page);
758  return page;
759 }
760 
762 sfileno
764 {
765  Ipc::Mem::PageId slot;
766  if (freeSlots->pop(slot)) {
767  const auto slotId = slot.number - 1;
768  debugs(20, 5, "got a previously free slot: " << slotId);
769 
771  debugs(20, 5, "and got a previously free page: " << page);
772  map->prepFreeSlice(slotId);
773  return slotId;
774  } else {
775  debugs(20, 3, "but there is no free page, returning " << slotId);
776  freeSlots->push(slot);
777  }
778  }
779 
780  // catch free slots delivered to noteFreeMapSlice()
781  assert(!waitingFor);
782  waitingFor.slot = &slot;
783  waitingFor.page = &page;
784  if (map->purgeOne()) {
785  assert(!waitingFor); // noteFreeMapSlice() should have cleared it
786  assert(slot.set());
787  assert(page.set());
788  const auto slotId = slot.number - 1;
789  map->prepFreeSlice(slotId);
790  debugs(20, 5, "got previously busy " << slotId << " and " << page);
791  return slotId;
792  }
793  assert(waitingFor.slot == &slot && waitingFor.page == &page);
794  waitingFor.slot = NULL;
795  waitingFor.page = NULL;
796 
797  debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
798  throw TexcHere("ran out of mem-cache slots");
799 }
800 
801 void
803 {
804  Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
805  debugs(20, 9, "slice " << sliceId << " freed " << pageId);
806  assert(pageId);
807  Ipc::Mem::PageId slotId;
809  slotId.number = sliceId + 1;
810  if (!waitingFor) {
811  // must zero pageId before we give slice (and pageId extras!) to others
812  Ipc::Mem::PutPage(pageId);
813  freeSlots->push(slotId);
814  } else {
815  *waitingFor.slot = slotId;
816  *waitingFor.page = pageId;
817  waitingFor.slot = NULL;
818  waitingFor.page = NULL;
819  pageId = Ipc::Mem::PageId();
820  }
821 }
822 
823 void
825 {
826  assert(e.mem_obj);
827 
828  debugs(20, 7, "entry " << e);
829 
830  switch (e.mem_obj->memCache.io) {
832  if (!shouldCache(e) || !startCaching(e)) {
834  e.memOutDecision(false);
835  return;
836  }
837  break;
838 
839  case MemObject::ioDone:
841  return; // we should not write in all of the above cases
842 
844  break; // already decided to write and still writing
845  }
846 
847  try {
848  copyToShm(e);
849  if (e.store_status == STORE_OK) // done receiving new content
850  completeWriting(e);
851  else
853  return;
854  } catch (const std::exception &x) { // TODO: should we catch ... as well?
855  debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
856  // fall through to the error handling code
857  }
858 
859  disconnect(e);
860 }
861 
862 void
864 {
865  assert(e.mem_obj);
866  const int32_t index = e.mem_obj->memCache.index;
867  assert(index >= 0);
868  assert(map);
869 
870  debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
871 
872  e.mem_obj->memCache.index = -1;
874  map->closeForWriting(index);
875 
876  CollapsedForwarding::Broadcast(e); // before we close our transient entry!
878 }
879 
880 void
882 {
883  debugs(47, 5, e);
884  if (e.hasMemStore()) {
887  if (!e.locked()) {
888  disconnect(e);
889  e.destroyMemObject();
890  }
891  } else if (const auto key = e.publicKey()) {
892  // the entry may have been loaded and then disconnected from the cache
893  evictIfFound(key);
894  if (!e.locked())
895  e.destroyMemObject();
896  }
897 }
898 
899 void
901 {
902  if (map)
903  map->freeEntryByKey(key);
904 }
905 
906 void
908 {
909  assert(e.mem_obj);
910  MemObject &mem_obj = *e.mem_obj;
911  if (e.hasMemStore()) {
912  if (mem_obj.memCache.io == MemObject::ioWriting) {
913  map->abortWriting(mem_obj.memCache.index);
914  mem_obj.memCache.index = -1;
915  mem_obj.memCache.io = MemObject::ioDone;
916  Store::Root().stopSharing(e); // broadcasts after the change
917  } else {
919  map->closeForReading(mem_obj.memCache.index);
920  mem_obj.memCache.index = -1;
921  mem_obj.memCache.io = MemObject::ioDone;
922  }
923  }
924 }
925 
926 bool
928 {
929  return Config.memShared && Config.memMaxSize > 0;
930 }
931 
933 int64_t
935 {
936  if (!Requested())
937  return 0;
938 
939  const int64_t minEntrySize = Ipc::Mem::PageSize();
940  const int64_t entryLimit = Config.memMaxSize / minEntrySize;
941  return entryLimit;
942 }
943 
948 {
949 public:
950  /* RegisteredRunner API */
952  virtual void finalizeConfig();
953  virtual void claimMemoryNeeds();
954  virtual void useConfig();
955  virtual ~MemStoreRr();
956 
957 protected:
958  /* Ipc::Mem::RegisteredRunner API */
959  virtual void create();
960 
961 private:
965 };
966 
968 
969 void
971 {
973 }
974 
975 void
977 {
978  // decide whether to use a shared memory cache if the user did not specify
979  if (!Config.memShared.configured()) {
981  Config.memMaxSize > 0);
982  } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
983  fatal("memory_cache_shared is on, but no support for shared memory detected");
984  } else if (Config.memShared && !UsingSmp()) {
985  debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
986  " a single worker is running");
987  }
988 
990  debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
991  (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
992  (Ipc::Mem::PageSize() / 1024.0) << " KB");
993  }
994 }
995 
996 void
998 {
1001 }
1002 
1003 void
1005 {
1006  if (!MemStore::Enabled())
1007  return;
1008 
1009  const int64_t entryLimit = MemStore::EntryLimit();
1010  assert(entryLimit > 0);
1011 
1012  Ipc::Mem::PageStack::Config spaceConfig;
1014  spaceConfig.pageSize = 0; // the pages are stored in Ipc::Mem::Pages
1015  spaceConfig.capacity = entryLimit;
1016  spaceConfig.createFull = true; // all pages are initially available
1017  Must(!spaceOwner);
1019  Must(!mapOwner);
1020  mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1021  Must(!extrasOwner);
1023 }
1024 
1026 {
1027  delete extrasOwner;
1028  delete mapOwner;
1029  delete spaceOwner;
1030 }
1031 
Anchor & writeableEntry(const AnchorId anchorId)
writeable anchor for the entry created by openForWriting()
Definition: StoreMap.cc:237
virtual bool updateAnchored(StoreEntry &) override
Definition: MemStore.cc:410
int hdr_sz
Definition: Message.h:82
void fatal(const char *message)
Definition: fatal.cc:28
uint64_t totalWritten
cumulative number of bytes appended so far
Definition: MemStore.cc:54
void freeEntryByKey(const cache_key *const key)
Definition: StoreMap.cc:331
HttpReply & adjustableBaseReply()
Definition: MemObject.cc:136
char * buf
Definition: MemBuf.h:134
ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice=-1)
Definition: MemStore.cc:72
class Ping::pingStats_ stats
virtual void claimMemoryNeeds()
Definition: MemStore.cc:970
virtual void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
Definition: MemStore.cc:333
Ipc::StoreMap MemStoreMap
Definition: MemStore.h:23
approximate stats of a set of ReadWriteLocks
Definition: ReadWriteLock.h:63
void terminate()
Definition: MemBuf.cc:250
uint32_t poolId
pool ID
Definition: PageStack.h:125
void closeForWriting(const sfileno fileno)
successfully finish creating or updating the entry at fileno pos
Definition: StoreMap.cc:200
void startAppending(const sfileno fileno)
restrict opened for writing entry to appending operations; allow reads
Definition: StoreMap.cc:191
virtual void append(const char *aBuf, int aSize) override
Appends a c-string to existing packed data.
Definition: MemStore.cc:87
bool startCaching(StoreEntry &e)
locks map anchor and preps to store the entry in shared memory
Definition: MemStore.cc:626
MemStore()
Definition: MemStore.cc:161
const cache_key * publicKey() const
Definition: Store.h:103
virtual StoreEntry * get(const cache_key *) override
Definition: MemStore.cc:301
unsigned char cache_key
Store key.
Definition: forward.h:29
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition: MemStore.h:68
#define EBIT_SET(flag, bit)
Definition: defines.h:105
bool isEmpty() const
Definition: SBuf.h:420
MemObject * mem_obj
Definition: Store.h:213
mem_hdr data_hdr
Definition: MemObject.h:138
size_t memMaxSize
Definition: SquidConfig.h:88
void createMemObject()
Definition: store.cc:1631
PoolId pool
Definition: Page.h:39
int bufWritten
buf bytes appended so far
Definition: MemStore.cc:67
MemObject & mem()
Definition: Store.h:52
SBuf & vappendf(const char *fmt, va_list vargs)
Definition: SBuf.cc:249
Shared memory page identifier, address, or handler.
Definition: Page.h:23
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:901
int64_t offset
bytes written/read to/from the memory cache so far
Definition: MemObject.h:182
static PoolId IdForMemStoreSpace()
stack of free cache_mem slot positions
Definition: PageStack.h:167
virtual void stat(StoreEntry &e) const override
Definition: MemStore.cc:215
virtual void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: MemStore.cc:253
MemCache memCache
current [shared] memory caching state for the entry
Definition: MemObject.h:186
void init(mb_size_t szInit, mb_size_t szMax)
Definition: MemBuf.cc:96
Definition: SBuf.h:86
sfileno lastWritingSlice
the last allocate slice for writing a store entry (during copyToShm)
Definition: MemStore.h:106
virtual void useConfig()
Definition: MemStore.cc:997
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:959
int64_t expectedReplySize() const
Definition: MemObject.cc:259
Edition fresh
new anchor and the updated chain prefix
Definition: StoreMap.h:209
bool isContiguous() const
Definition: MemObject.cc:421
SliceId sliceContaining(const sfileno fileno, const uint64_t nth) const
Definition: StoreMap.cc:421
static constexpr Io ioUndecided
Definition: MemObject.h:163
const A & max(A const &lhs, A const &rhs)
virtual uint64_t currentSize() const override
current size
Definition: MemStore.cc:270
virtual void append(const char *c, int sz)
Definition: MemBuf.cc:216
uint16_t flags
Definition: Store.h:224
void packHeadersUsingSlowPacker(Packable &p) const
same as packHeadersUsingFastPacker() but assumes that p cannot quickly process small additions
Definition: HttpReply.cc:94
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:427
bool write(StoreIOBuffer const &)
Definition: stmem.cc:342
Io io
current I/O state
Definition: MemObject.h:184
int64_t endOffset() const
Definition: MemObject.cc:235
void push(PageId &page)
makes value available as a free page number to future pop() callers
Definition: PageStack.cc:464
#define shm_new(Class)
Definition: Pointer.h:200
static const char * SpaceLabel
shared memory segment path to use for the free slices index
Definition: MemStore.cc:28
void updateStats(ReadWriteLockStats &stats) const
adds approximate current stats to the supplied ones
Definition: StoreMap.cc:751
MemStore & store
Definition: MemStore.cc:61
#define DBG_IMPORTANT
Definition: Debug.h:46
void prepFreeSlice(const SliceId sliceId)
prepare a chain-unaffiliated slice for being added to an entry chain
Definition: StoreMap.cc:413
void transientsCompleteWriting(StoreEntry &)
marks the entry completed for collapsed requests
Definition: Controller.cc:647
virtual uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: MemStore.cc:258
void destroyMemObject()
Definition: store.cc:398
static Owner * Init(const SBuf &path, const int slotLimit)
initialize shared memory
Definition: StoreMap.cc:42
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:55
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
Definition: StoreMap.h:111
virtual void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: MemStore.cc:202
virtual ~MemStoreRr()
Definition: MemStore.cc:1025
static bool Requested()
Definition: MemStore.cc:927
@ ENTRY_SPECIAL
Definition: enums.h:84
void closeForUpdating(Update &update)
makes updated info available to others, unlocks, and cleans up
Definition: StoreMap.cc:605
static const SBuf MapLabel("cache_mem_map")
shared memory segment path to use for MemStore maps
@ ENTRY_FWD_HDR_WAIT
Definition: enums.h:111
virtual uint64_t maxSize() const override
Definition: MemStore.cc:264
const sfileno fileNo
Definition: MemStore.cc:62
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
const Anchor * openForReading(const cache_key *const key, sfileno &fileno)
opens entry (identified by key) for reading, increments read level
Definition: StoreMap.cc:440
SlotAndPage waitingFor
a cache for a single "hot" free slot and page
Definition: MemStore.h:117
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:49
struct Ipc::StoreMapAnchor::Basics basics
void memOutDecision(const bool willCacheInRam)
Definition: store.cc:1860
void configure(bool beSet)
enables or disables the option; updating to 'configured' state
Definition: YesNoNone.h:53
#define NULL
Definition: types.h:166
void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
copies at most one slice worth of local memory to shared memory
Definition: MemStore.cc:685
const char * rawContent() const
Definition: SBuf.cc:519
const Anchor & readableEntry(const AnchorId anchorId) const
readable anchor for the entry created by openForReading()
Definition: StoreMap.cc:244
MemStoreMap::Owner * mapOwner
primary map Owner
Definition: MemStore.cc:963
void copyToShm(StoreEntry &e)
copies all local data to shared memory
Definition: MemStore.cc:649
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:128
virtual void evictIfFound(const cache_key *) override
Definition: MemStore.cc:900
SBuf vary_headers
Definition: MemObject.h:204
@ IN_MEMORY
Definition: enums.h:36
@ ENTRY_VALIDATED
Definition: enums.h:113
void NotePageNeed(const int purpose, const int count)
claim the need for a number of pages for a given purpose
Definition: Pages.cc:72
const HttpReply & baseReply() const
Definition: MemObject.h:59
int entryLimit() const
maximum entryCount() possible
Definition: StoreMap.cc:733
int32_t StoreMapSliceId
Definition: StoreMap.h:24
void stopSharing(StoreEntry &)
stop any current (and prevent any future) SMP sharing of the given entry
Definition: Controller.cc:637
void completeWriting(StoreEntry &e)
all data has been received; there will be no more write() calls
Definition: MemStore.cc:863
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1990
Edition stale
old anchor and chain
Definition: StoreMap.h:208
Definition: MemBuf.h:23
static const char * ExtrasLabel
shared memory segment path to use for IDs of shared pages with slice data
Definition: MemStore.cc:30
int sliceLimit() const
maximum number of slices possible
Definition: StoreMap.cc:745
#define EBIT_TEST(flag, bit)
Definition: defines.h:107
bool set() const
true if and only if both critical components have been initialized
Definition: Page.h:29
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:157
void closeForReading(const sfileno fileno)
closes open entry after reading, decrements read level
Definition: StoreMap.cc:497
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition: Store.h:205
StoreEntry * entry
the store entry being updated
Definition: StoreMap.h:207
#define shm_old(Class)
Definition: Pointer.h:201
Ipc::Mem::Owner< MemStoreMapExtras > * extrasOwner
PageIds Owner.
Definition: MemStore.cc:964
store_status_t store_status
Definition: Store.h:236
#define assert(EX)
Definition: assert.h:19
virtual uint64_t currentCount() const override
the total number of objects stored right now
Definition: MemStore.cc:277
virtual void evictCached(StoreEntry &) override
Definition: MemStore.cc:881
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:86
virtual void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) override
adjust slice-linked state before a locked Readable slice is erased
Definition: MemStore.cc:802
FREE destroyStoreEntry
@ NOT_IN_MEMORY
Definition: enums.h:35
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:105
size_t PageLevel()
approximate total number of shared memory pages used now
Definition: Pages.cc:80
int entryCount() const
number of writeable and readable entries
Definition: StoreMap.cc:739
void PutPage(PageId &page)
makes identified page available as a free page to future GetPage() callers
Definition: Pages.cc:41
Aggregates information required for updating entry metadata and headers.
Definition: StoreMap.h:181
bool configured() const
Definition: YesNoNone.h:67
RunnerRegistrationEntry(MemStoreRr)
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition: StoreMap.h:193
mem_status_t mem_status
Definition: Store.h:232
bool complete() const
Definition: StoreMap.h:77
size_type length() const
Returns the number of bytes stored in SBuf.
Definition: SBuf.h:404
@ STORE_OK
Definition: enums.h:50
void copyToShm()
copies the entire buffer to shared memory
Definition: MemStore.cc:115
Anchor * openForWriting(const cache_key *const key, sfileno &fileno)
Definition: StoreMap.cc:140
size_t maxInMemObjSize
Definition: SquidConfig.h:271
void anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
anchors StoreEntry to an already locked map entry
Definition: MemStore.cc:433
signed_int32_t sfileno
Definition: forward.h:22
bool freeEntry(const sfileno)
Definition: StoreMap.cc:313
Ipc::Mem::Pointer< Ipc::Mem::PageStack > freeSlots
unused map slot IDs
Definition: MemStore.h:99
sfileno reserveSapForWriting(Ipc::Mem::PageId &page)
finds a slot and a free page to fill or throws
Definition: MemStore.cc:763
bool copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
imports one shared memory slice into local memory
Definition: MemStore.cc:535
size_t pageSize
page size, used to calculate shared memory size
Definition: PageStack.h:126
void setMemStatus(mem_status_t)
Definition: store.cc:1580
static bool Enabled()
Whether shared memory support is available.
Definition: Segment.cc:322
StoreEntry * entry
the entry being updated
Definition: MemStore.cc:45
static constexpr Io ioDone
Definition: MemObject.h:166
Ipc::StoreMapSliceId lastSlice
the slot keeping the last byte of the appended content (at least)
Definition: MemStore.cc:52
bool pop(PageId &page)
sets value and returns true unless no free page numbers are found
Definition: PageStack.cc:441
virtual void create()
called when the runner should create a new memory segment
Definition: MemStore.cc:1004
MemStoreMap * map
index of mem-cached entries
Definition: MemStore.h:100
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition: StoreMap.cc:979
ssize_t copy(StoreIOBuffer const &) const
Definition: stmem.cc:226
virtual void init() override
Definition: MemStore.cc:171
StoreMapSliceId splicingPoint
the last slice in the chain still containing metadata/headers
Definition: StoreMap.h:198
const Slice & readableSlice(const AnchorId anchorId, const SliceId sliceId) const
readable slice within an entry chain opened by openForReading()
Definition: StoreMap.cc:229
char * PagePointer(const PageId &page)
converts page handler into a temporary writeable shared memory pointer
Definition: Pages.cc:48
void updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
Definition: MemStore.cc:353
int32_t index
entry position inside the memory cache
Definition: MemObject.h:181
uint32_t number
page number within the segment
Definition: Page.h:42
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition: StoreMap.h:194
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Controller.cc:191
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition: Pages.cc:55
bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
copies the entire entry from shared to local memory
Definition: MemStore.cc:458
Ipc::Mem::Owner< Ipc::Mem::PageStack > * spaceOwner
free slices Owner
Definition: MemStore.cc:962
bool GetPage(const PageId::Purpose purpose, PageId &page)
sets page ID and returns true unless no free pages are found
Definition: Pages.cc:34
Ipc::Mem::PageId * slot
local slot variable, waiting to be filled
Definition: MemStore.h:114
PageStack construction and SharedMemorySize calculation parameters.
Definition: PageStack.h:123
void write(StoreEntry &e)
copy non-shared entry data of the being-cached entry to our cache
Definition: MemStore.cc:824
StoreMapCleaner * cleaner
notified before a readable entry is freed
Definition: StoreMap.h:361
static int64_t EntryLimit()
calculates maximum number of entries we need to store and map
Definition: MemStore.cc:934
virtual bool anchorToCache(StoreEntry &e, bool &inSync) override
Definition: MemStore.cc:393
bool purgeOne()
either finds and frees an entry with at least 1 slice or returns false
Definition: StoreMap.cc:702
int64_t object_sz
Definition: MemObject.h:198
int bufSize
buf size
Definition: MemStore.cc:66
State of an entry with regards to the [shared] memory caching.
Definition: MemObject.h:178
virtual bool dereference(StoreEntry &e) override
Definition: MemStore.cc:294
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:69
uint64_t swap_file_sz
Definition: Store.h:222
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition: PageStack.h:130
Ipc::StoreMap::Slice & nextAppendableSlice(const sfileno entryIndex, sfileno &sliceOffset)
Definition: MemStore.cc:716
bool updateAnchoredWith(StoreEntry &, const sfileno, const Ipc::StoreMapAnchor &)
updates Transients entry after its anchor has been located
Definition: MemStore.cc:424
#define PRId64
Definition: types.h:110
const HttpReply & freshestReply() const
Definition: MemObject.h:67
virtual void finalizeConfig()
Definition: MemStore.cc:976
void abortWriting(const sfileno fileno)
stop writing the entry, freeing its slot for others to use if possible
Definition: StoreMap.cc:251
Ipc::Mem::PageId pageForSlice(Ipc::StoreMapSliceId sliceId)
safely returns a previously allocated memory page for the given entry slice
Definition: MemStore.cc:752
const char * buf
content being appended now
Definition: MemStore.cc:65
bool memoryCachable()
checkCachable() and can be cached in memory
Definition: store.cc:1331
size_t PagesAvailable()
approximate total number of shared memory pages we can allocate now
Definition: Pages.h:47
PageCount capacity
the maximum number of pages
Definition: PageStack.h:127
size_t PageSize()
returns page size in bytes; all pages are assumed to be the same size
Definition: Pages.cc:28
struct SquidConfig::@110 Store
static constexpr Io ioReading
Definition: MemObject.h:164
virtual void vappendf(const char *fmt, va_list ap) override
Definition: MemStore.cc:103
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:48
int locked() const
Definition: Store.h:136
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:658
void copyToShmSlice(Ipc::StoreMap::Slice &slice)
copies at most one slice worth of buffer to shared memory
Definition: MemStore.cc:133
Ipc::StoreMapSliceId firstSlice
Definition: MemStore.cc:49
static constexpr Io ioWriting
Definition: MemObject.h:165
virtual void useConfig()
Definition: Segment.cc:377
void disconnect(StoreEntry &e)
called when the entry is about to forget its association with mem cache
Definition: MemStore.cc:907
Ipc::Mem::PageId * page
local page variable, waiting to be filled
Definition: MemStore.h:115
void abortUpdating(Update &update)
undoes partial update, unlocks, and cleans up
Definition: StoreMap.cc:269
bool openForUpdating(Update &update, sfileno fileNoHint)
finds and locks the Update entry for an exclusive metadata update
Definition: StoreMap.cc:523
Ipc::Mem::Pointer< Extras > extras
IDs of pages with slice data.
Definition: MemStore.h:103
virtual ~MemStore()
Definition: MemStore.cc:165
virtual void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: MemStore.cc:289
uint32_t Size
Definition: StoreMap.h:31
const A & min(A const &lhs, A const &rhs)
aggregates anchor and slice owners for Init() caller convenience
Definition: StoreMap.h:232
bool shouldCache(StoreEntry &e) const
whether we should cache the entry
Definition: MemStore.cc:572
void const char * buf
Definition: stub_helper.cc:16
virtual int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: MemStore.cc:283
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:13
class SquidConfig Config
Definition: SquidConfig.cc:12
Slice & writeableSlice(const AnchorId anchorId, const SliceId sliceId)
writeable slice within an entry chain created by openForWriting()
Definition: StoreMap.cc:221
@ STORE_PENDING
Definition: enums.h:51
Controller & Root()
safely access controller singleton
Definition: Controller.cc:938

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors