MemStore.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 20 Memory Cache */
10
11#include "squid.h"
13#include "CollapsedForwarding.h"
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
18#include "MemStore.h"
19#include "mime_header.h"
20#include "sbuf/SBuf.h"
21#include "sbuf/Stream.h"
22#include "SquidConfig.h"
23#include "SquidMath.h"
24#include "StoreStats.h"
25#include "tools.h"
26
28static const SBuf MapLabel("cache_mem_map");
30static const char *SpaceLabel = "cache_mem_space";
32static const char *ExtrasLabel = "cache_mem_ex";
33// TODO: sync with Rock::SwapDir::*Path()
34
37class ShmWriter: public Packable
38{
39public:
40 ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
41
42 /* Packable API */
43 void append(const char *aBuf, int aSize) override;
44 void vappendf(const char *fmt, va_list ap) override;
45
46public:
48
52
55
56 uint64_t totalWritten;
57
58protected:
59 void copyToShm();
61
62private:
65
66 /* set by (and only valid during) append calls */
67 const char *buf;
68 int bufSize;
70};
71
72/* ShmWriter */
73
74ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
75 entry(anEntry),
76 firstSlice(aFirstSlice),
77 lastSlice(firstSlice),
78 totalWritten(0),
79 store(aStore),
80 fileNo(aFileNo),
81 buf(nullptr),
82 bufSize(0),
83 bufWritten(0)
84{
85 Must(entry);
86}
87
88void
89ShmWriter::append(const char *aBuf, int aBufSize)
90{
91 Must(!buf);
92 buf = aBuf;
93 bufSize = aBufSize;
94 if (bufSize) {
95 Must(buf);
96 bufWritten = 0;
97 copyToShm();
98 }
99 buf = nullptr;
100 bufSize = 0;
101 bufWritten = 0;
102}
103
104void
105ShmWriter::vappendf(const char *fmt, va_list ap)
106{
107 SBuf vaBuf;
108 va_list apCopy;
109 va_copy(apCopy, ap);
110 vaBuf.vappendf(fmt, apCopy);
111 va_end(apCopy);
112 append(vaBuf.rawContent(), vaBuf.length());
113}
114
116void
118{
119 Must(bufSize > 0); // do not use up shared memory pages for nothing
120 Must(firstSlice < 0 || lastSlice >= 0);
121
122 // fill, skip slices that are already full
123 while (bufWritten < bufSize) {
125 if (firstSlice < 0)
127 copyToShmSlice(slice);
128 }
129
130 debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
131}
132
134void
136{
138 debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
139 page);
140
142 const int64_t writingDebt = bufSize - bufWritten;
143 const int64_t pageSize = Ipc::Mem::PageSize();
144 const int64_t sliceOffset = totalWritten % pageSize;
145 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
146 memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
147 copySize);
148
149 debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
150 entry << " from " << sliceOffset << " in " << page);
151
152 slice.size += copySize;
153 bufWritten += copySize;
154 totalWritten += copySize;
155 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
156
157 // either we wrote everything or we filled the entire slice
158 Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
159}
160
161/* MemStore */
162
163MemStore::MemStore(): map(nullptr), lastWritingSlice(-1)
164{
165}
166
168{
169 delete map;
170}
171
172void
174{
175 const int64_t entryLimit = EntryLimit();
176 if (entryLimit <= 0)
177 return; // no shared memory cache configured or a misconfiguration
178
179 // check compatibility with the disk cache, if any
180 if (Config.cacheSwap.n_configured > 0) {
181 const int64_t diskMaxSize = Store::Root().maxObjectSize();
182 const int64_t memMaxSize = maxObjectSize();
183 if (diskMaxSize == -1) {
184 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
185 "is unlimited but mem-cache maximum object size is " <<
186 memMaxSize / 1024.0 << " KB");
187 } else if (diskMaxSize > memMaxSize) {
188 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
189 "is too large for mem-cache: " <<
190 diskMaxSize / 1024.0 << " KB > " <<
191 memMaxSize / 1024.0 << " KB");
192 }
193 }
194
197
198 Must(!map);
199 map = new MemStoreMap(MapLabel);
200 map->cleaner = this;
201}
202
203void
205{
206 const size_t pageSize = Ipc::Mem::PageSize();
207
208 stats.mem.shared = true;
209 stats.mem.capacity =
211 stats.mem.size =
213 stats.mem.count = currentCount();
214}
215
216void
218{
219 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
220
221 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
222 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
223 currentSize() / 1024.0,
225
226 if (map) {
227 const int entryLimit = map->entryLimit();
228 const int slotLimit = map->sliceLimit();
229 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
230 if (entryLimit > 0) {
231 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
232 currentCount(), (100.0 * currentCount() / entryLimit));
233 }
234
235 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
236 if (slotLimit > 0) {
237 const unsigned int slotsFree =
239 if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
240 const int usedSlots = slotLimit - static_cast<int>(slotsFree);
241 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
242 usedSlots, (100.0 * usedSlots / slotLimit));
243 }
244
245 if (slotLimit < 100) { // XXX: otherwise too expensive to count
248 stats.dump(e);
249 }
250 }
251 }
252}
253
254void
256{
257}
258
259uint64_t
261{
262 return 0; // XXX: irrelevant, but Store parent forces us to implement this
263}
264
265uint64_t
267{
268 return Config.memMaxSize;
269}
270
271uint64_t
273{
276}
277
278uint64_t
280{
281 return map ? map->entryCount() : 0;
282}
283
284int64_t
286{
288}
289
290void
292{
293}
294
295bool
297{
298 // no need to keep e in the global store_table for us; we have our own map
299 return false;
300}
301
304{
305 if (!map)
306 return nullptr;
307
308 sfileno index;
309 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
310 if (!slot)
311 return nullptr;
312
313 // create a brand new store entry and initialize it with stored info
314 StoreEntry *e = new StoreEntry();
315
316 try {
317 // XXX: We do not know the URLs yet, only the key, but we need to parse and
318 // store the response for the Root().find() callers to be happy because they
319 // expect IN_MEMORY entries to already have the response headers and body.
320 e->createMemObject();
321
322 anchorEntry(*e, index, *slot);
323
324 // TODO: make copyFromShm() throw on all failures, simplifying this code
325 if (copyFromShm(*e, index, *slot))
326 return e;
327 debugs(20, 3, "failed for " << *e);
328 } catch (...) {
329 // see store_client::parseHttpHeadersFromDisk() for problems this may log
330 debugs(20, DBG_IMPORTANT, "ERROR: Cannot load a cache hit from shared memory" <<
331 Debug::Extra << "exception: " << CurrentException <<
332 Debug::Extra << "cache_mem entry: " << *e);
333 }
334
335 map->freeEntry(index); // do not let others into the same trap
336 destroyStoreEntry(static_cast<hash_link *>(e));
337 return nullptr;
338}
339
340void
342{
343 if (!map)
344 return;
345
346 Ipc::StoreMapUpdate update(updatedE);
347 assert(updatedE);
348 assert(updatedE->mem_obj);
349 if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
350 return;
351
352 try {
353 updateHeadersOrThrow(update);
354 } catch (const std::exception &ex) {
355 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
356 map->abortUpdating(update);
357 }
358}
359
360void
362{
363 // our +/- hdr_sz math below does not work if the chains differ [in size]
365
366 const uint64_t staleHdrSz = update.entry->mem().baseReply().hdr_sz;
367 debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
368
369 /* we will need to copy same-slice payload after the stored headers later */
370 Must(staleHdrSz > 0);
371 update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
372 Must(update.stale.splicingPoint >= 0);
373 Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
374
375 Must(update.stale.anchor);
376 ShmWriter writer(*this, update.entry, update.fresh.fileNo);
378 const uint64_t freshHdrSz = writer.totalWritten;
379 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
380
381 /* copy same-slice payload remaining after the stored headers */
382 const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
383 const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
384 const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
385 Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
386 Must(slice.size >= headersInLastSlice);
387 const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
388 const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
389 char *page = static_cast<char*>(PagePointer(extra.page));
390 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
391 writer.append(page + headersInLastSlice, payloadInLastSlice);
392 update.fresh.splicingPoint = writer.lastSlice;
393
394 update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
395 update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
396
397 map->closeForUpdating(update);
398}
399
400bool
402{
403 Assure(!entry.hasMemStore());
405
406 if (!map)
407 return false;
408
409 sfileno index;
410 const Ipc::StoreMapAnchor *const slot = map->openForReading(
411 reinterpret_cast<cache_key*>(entry.key), index);
412 if (!slot)
413 return false;
414
415 anchorEntry(entry, index, *slot);
416 if (!updateAnchoredWith(entry, index, *slot))
417 throw TextException("updateAnchoredWith() failure", Here());
418 return true;
419}
420
421bool
423{
424 if (!map)
425 return false;
426
427 assert(entry.mem_obj);
428 assert(entry.hasMemStore());
429 const sfileno index = entry.mem_obj->memCache.index;
430 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
431 return updateAnchoredWith(entry, index, anchor);
432}
433
435bool
437{
438 entry.swap_file_sz = anchor.basics.swap_file_sz;
439 const bool copied = copyFromShm(entry, index, anchor);
440 return copied;
441}
442
444void
446{
447 assert(!e.hasDisk()); // no conflict with disk entry basics
448 anchor.exportInto(e);
449
450 assert(e.mem_obj);
451 if (anchor.complete()) {
455 } else {
457 assert(e.mem_obj->object_sz < 0);
459 }
460
462
464 mc.index = index;
466}
467
469bool
471{
472 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
473 assert(e.mem_obj);
474
475 // emulate the usual Store code but w/o inapplicable checks and callbacks:
476
477 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
478 bool wasEof = anchor.complete() && sid < 0;
479 int64_t sliceOffset = 0;
480
481 SBuf httpHeaderParsingBuffer;
482 while (sid >= 0) {
483 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
484 // slice state may change during copying; take snapshots now
485 wasEof = anchor.complete() && slice.next < 0;
486 const Ipc::StoreMapSlice::Size wasSize = slice.size;
487
488 debugs(20, 8, "entry " << index << " slice " << sid << " eof " <<
489 wasEof << " wasSize " << wasSize << " <= " <<
490 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
491 " mem.endOffset " << e.mem_obj->endOffset());
492
493 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
494 // size of the slice data that we already copied
495 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
496 assert(prefixSize <= wasSize);
497
498 const MemStoreMapExtras::Item &extra = extras->items[sid];
499
500 char *page = static_cast<char*>(PagePointer(extra.page));
501 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
502 e.mem_obj->endOffset(),
503 page + prefixSize);
504
505 copyFromShmSlice(e, sliceBuf);
506 debugs(20, 8, "entry " << index << " copied slice " << sid <<
507 " from " << extra.page << '+' << prefixSize);
508
509 // parse headers if needed; they might span multiple slices!
510 auto &reply = e.mem().adjustableBaseReply();
511 if (reply.pstate != Http::Message::psParsed) {
512 httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
513 if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
514 httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
515 }
516 }
517 // else skip a [possibly incomplete] slice that we copied earlier
518
519 // careful: the slice may have grown _and_ gotten the next slice ID!
520 if (slice.next >= 0) {
521 assert(!wasEof);
522 // here we know that slice.size may not change any more
523 if (wasSize >= slice.size) { // did not grow since we started copying
524 sliceOffset += wasSize;
525 sid = slice.next;
526 }
527 } else if (wasSize >= slice.size) { // did not grow
528 break;
529 }
530 }
531
532 if (!wasEof) {
533 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
534 anchor.basics.swap_file_sz << " bytes of " << e);
535 return true;
536 }
537
538 if (anchor.writerHalted) {
539 debugs(20, 5, "mem-loaded aborted " << e.mem_obj->endOffset() << '/' <<
540 anchor.basics.swap_file_sz << " bytes of " << e);
541 return false;
542 }
543
544 debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
545 anchor.basics.swap_file_sz << " bytes of " << e);
546
548 throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
549
550 // from StoreEntry::complete()
554
555 assert(e.mem_obj->object_sz >= 0);
556 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
557 // would be nice to call validLength() here, but it needs e.key
558
559 // we read the entire response into the local memory; no more need to lock
560 disconnect(e);
561 return true;
562}
563
565void
567{
568 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
569
570 // local memory stores both headers and body so copy regardless of pstate
571 const int64_t offBefore = e.mem_obj->endOffset();
572 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
573 const int64_t offAfter = e.mem_obj->endOffset();
574 // expect to write the entire buf because StoreEntry::write() never fails
575 assert(offAfter >= 0 && offBefore <= offAfter &&
576 static_cast<size_t>(offAfter - offBefore) == buf.length);
577}
578
580bool
582{
583 if (e.mem_status == IN_MEMORY) {
584 debugs(20, 5, "already loaded from mem-cache: " << e);
585 return false;
586 }
587
588 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
589 debugs(20, 5, "already written to mem-cache: " << e);
590 return false;
591 }
592
593 if (shutting_down) {
594 debugs(20, 5, "avoid heavy optional work during shutdown: " << e);
595 return false;
596 }
597
598 // To avoid SMP workers releasing each other caching attempts, restrict disk
599 // caching to StoreEntry publisher. This check goes before memoryCachable()
600 // that may incorrectly release() publisher's entry via checkCachable().
601 if (Store::Root().transientsReader(e)) {
602 debugs(20, 5, "yield to entry publisher: " << e);
603 return false;
604 }
605
606 if (!e.memoryCachable()) {
607 debugs(20, 7, "Not memory cachable: " << e);
608 return false; // will not cache due to entry state or properties
609 }
610
611 assert(e.mem_obj);
612
613 if (!e.mem_obj->vary_headers.isEmpty()) {
614 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
615 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
616 return false;
617 }
618
619 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
620 const int64_t loadedSize = e.mem_obj->endOffset();
621 const int64_t ramSize = max(loadedSize, expectedSize);
622 if (ramSize > maxObjectSize()) {
623 debugs(20, 5, "Too big max(" <<
624 loadedSize << ", " << expectedSize << "): " << e);
625 return false; // will not cache due to cachable entry size limits
626 }
627
628 if (!e.mem_obj->isContiguous()) {
629 debugs(20, 5, "not contiguous");
630 return false;
631 }
632
633 if (!map) {
634 debugs(20, 5, "No map to mem-cache " << e);
635 return false;
636 }
637
638 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
639 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
640 return false;
641 }
642
643 return true;
644}
645
647bool
649{
650 sfileno index = 0;
651 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
652 if (!slot) {
653 debugs(20, 5, "No room in mem-cache map to index " << e);
654 return false;
655 }
656
657 assert(e.mem_obj);
658 e.mem_obj->memCache.index = index;
660 slot->set(e);
661 // Do not allow others to feed off an unknown-size entry because we will
662 // stop swapping it out if it grows too large.
663 if (e.mem_obj->expectedReplySize() >= 0)
664 map->startAppending(index);
665 e.memOutDecision(true);
666 return true;
667}
668
670void
672{
673 assert(map);
674 assert(e.mem_obj);
676
677 const int64_t eSize = e.mem_obj->endOffset();
678 if (e.mem_obj->memCache.offset >= eSize) {
679 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
680 e.mem_obj->memCache.offset << " >= " << eSize);
681 return; // nothing to do (yet)
682 }
683
684 // throw if an accepted unknown-size entry grew too big or max-size changed
685 Must(eSize <= maxObjectSize());
686
687 const int32_t index = e.mem_obj->memCache.index;
688 assert(index >= 0);
689 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
690 lastWritingSlice = anchor.start;
691
692 // fill, skip slices that are already full
693 // Optimize: remember lastWritingSlice in e.mem_obj
694 while (e.mem_obj->memCache.offset < eSize) {
697 if (anchor.start < 0)
698 anchor.start = lastWritingSlice;
699 copyToShmSlice(e, anchor, slice);
700 }
701
702 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
703}
704
706void
708{
710 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
711 page);
712
713 const int64_t bufSize = Ipc::Mem::PageSize();
714 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
715 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
716 static_cast<char*>(PagePointer(page)) + sliceOffset);
717
718 // check that we kept everything or purge incomplete/sparse cached entry
719 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
720 if (copied <= 0) {
721 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
722 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
723 " in " << page);
724 throw TexcHere("data_hdr.copy failure");
725 }
726
727 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
728 " from " << e.mem_obj->memCache.offset << " in " << page);
729
730 slice.size += copied;
731 e.mem_obj->memCache.offset += copied;
733}
734
739{
740 // allocate the very first slot for the entry if needed
741 if (sliceOffset < 0) {
742 Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
743 Must(anchor.start < 0);
744 Ipc::Mem::PageId page;
745 sliceOffset = reserveSapForWriting(page); // throws
746 extras->items[sliceOffset].page = page;
747 anchor.start = sliceOffset;
748 }
749
750 const size_t sliceCapacity = Ipc::Mem::PageSize();
751 do {
752 Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
753
754 if (slice.size >= sliceCapacity) {
755 if (slice.next >= 0) {
756 sliceOffset = slice.next;
757 continue;
758 }
759
760 Ipc::Mem::PageId page;
761 slice.next = sliceOffset = reserveSapForWriting(page);
762 extras->items[sliceOffset].page = page;
763 debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
764 continue; // to get and return the slice at the new sliceOffset
765 }
766
767 return slice;
768 } while (true);
769 /* not reached */
770}
771
775{
776 Must(extras);
777 Must(sliceId >= 0);
778 Ipc::Mem::PageId page = extras->items[sliceId].page;
779 Must(page);
780 return page;
781}
782
786{
787 Ipc::Mem::PageId slot;
788 if (freeSlots->pop(slot)) {
789 const auto slotId = slot.number - 1;
790 debugs(20, 5, "got a previously free slot: " << slotId);
791
793 debugs(20, 5, "and got a previously free page: " << page);
794 map->prepFreeSlice(slotId);
795 return slotId;
796 } else {
797 debugs(20, 3, "but there is no free page, returning " << slotId);
798 freeSlots->push(slot);
799 }
800 }
801
802 // catch free slots delivered to noteFreeMapSlice()
804 waitingFor.slot = &slot;
805 waitingFor.page = &page;
806 if (map->purgeOne()) {
807 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
808 assert(slot.set());
809 assert(page.set());
810 const auto slotId = slot.number - 1;
811 map->prepFreeSlice(slotId);
812 debugs(20, 5, "got previously busy " << slotId << " and " << page);
813 return slotId;
814 }
815 assert(waitingFor.slot == &slot && waitingFor.page == &page);
816 waitingFor.slot = nullptr;
817 waitingFor.page = nullptr;
818
819 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
820 throw TexcHere("ran out of mem-cache slots");
821}
822
823void
825{
826 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
827 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
828 assert(pageId);
829 Ipc::Mem::PageId slotId;
831 slotId.number = sliceId + 1;
832 if (!waitingFor) {
833 // must zero pageId before we give slice (and pageId extras!) to others
834 Ipc::Mem::PutPage(pageId);
835 freeSlots->push(slotId);
836 } else {
837 *waitingFor.slot = slotId;
838 *waitingFor.page = pageId;
839 waitingFor.slot = nullptr;
840 waitingFor.page = nullptr;
841 pageId = Ipc::Mem::PageId();
842 }
843}
844
845void
847{
848 assert(e.mem_obj);
849
850 debugs(20, 7, "entry " << e);
851
852 switch (e.mem_obj->memCache.io) {
854 if (!shouldCache(e) || !startCaching(e)) {
856 e.memOutDecision(false);
857 return;
858 }
859 break;
860
863 return; // we should not write in all of the above cases
864
866 break; // already decided to write and still writing
867 }
868
869 try {
870 copyToShm(e);
871 if (e.store_status == STORE_OK) // done receiving new content
873 else
875 return;
876 } catch (const std::exception &x) { // TODO: should we catch ... as well?
877 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
878 // fall through to the error handling code
879 }
880
881 disconnect(e);
882}
883
884void
886{
887 assert(e.mem_obj);
888 const int32_t index = e.mem_obj->memCache.index;
889 assert(index >= 0);
890 assert(map);
891
892 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
893
894 e.mem_obj->memCache.index = -1;
896 map->closeForWriting(index);
897
899 e.storeWriterDone();
900}
901
902void
904{
905 debugs(47, 5, e);
906 if (e.hasMemStore()) {
909 if (!e.locked()) {
910 disconnect(e);
912 }
913 } else if (const auto key = e.publicKey()) {
914 // the entry may have been loaded and then disconnected from the cache
915 evictIfFound(key);
916 if (!e.locked())
918 }
919}
920
921void
923{
924 if (map)
925 map->freeEntryByKey(key);
926}
927
928void
930{
931 assert(e.mem_obj);
932 MemObject &mem_obj = *e.mem_obj;
933 if (e.hasMemStore()) {
934 if (mem_obj.memCache.io == MemObject::ioWriting) {
935 map->abortWriting(mem_obj.memCache.index);
936 mem_obj.memCache.index = -1;
937 mem_obj.memCache.io = MemObject::ioDone;
939 e.storeWriterDone();
940 } else {
943 mem_obj.memCache.index = -1;
944 mem_obj.memCache.io = MemObject::ioDone;
945 }
946 }
947}
948
949bool
951{
952 return Config.memShared && Config.memMaxSize > 0;
953}
954
956int64_t
958{
959 if (!Requested())
960 return 0;
961
962 const int64_t minEntrySize = Ipc::Mem::PageSize();
963 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
964 return entryLimit;
965}
966
971{
972public:
973 /* RegisteredRunner API */
974 MemStoreRr(): spaceOwner(nullptr), mapOwner(nullptr), extrasOwner(nullptr) {}
975 void finalizeConfig() override;
976 void claimMemoryNeeds() override;
977 void useConfig() override;
978 ~MemStoreRr() override;
979
980protected:
981 /* Ipc::Mem::RegisteredRunner API */
982 void create() override;
983
984private:
988};
989
991
992void
994{
996}
997
998void
1000{
1001 // decide whether to use a shared memory cache if the user did not specify
1002 if (!Config.memShared.configured()) {
1004 Config.memMaxSize > 0);
1005 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
1006 fatal("memory_cache_shared is on, but no support for shared memory detected");
1007 } else if (Config.memShared && !UsingSmp()) {
1008 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
1009 " a single worker is running");
1010 }
1011
1013 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
1014 (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
1015 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1016 }
1017}
1018
1019void
1021{
1024}
1025
1026void
1028{
1029 if (!MemStore::Enabled())
1030 return;
1031
1032 const int64_t entryLimit = MemStore::EntryLimit();
1033 assert(entryLimit > 0);
1034
1035 Ipc::Mem::PageStack::Config spaceConfig;
1037 spaceConfig.pageSize = 0; // the pages are stored in Ipc::Mem::Pages
1038 spaceConfig.capacity = entryLimit;
1039 spaceConfig.createFull = true; // all pages are initially available
1040 Must(!spaceOwner);
1042 Must(!mapOwner);
1043 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
1044 Must(!extrasOwner);
1046}
1047
1049{
1050 delete extrasOwner;
1051 delete mapOwner;
1052 delete spaceOwner;
1053}
1054
#define Assure(condition)
Definition: Assure.h:35
#define Here()
source code location of the caller
Definition: Here.h:15
DefineRunnerRegistrator(MemStoreRr)
static const SBuf MapLabel("cache_mem_map")
shared memory segment path to use for MemStore maps
static const char * ExtrasLabel
shared memory segment path to use for IDs of shared pages with slice data
Definition: MemStore.cc:32
static const char * SpaceLabel
shared memory segment path to use for the free slices index
Definition: MemStore.cc:30
Ipc::StoreMap MemStoreMap
Definition: MemStore.h:23
#define shm_new(Class)
Definition: Pointer.h:200
#define shm_old(Class)
Definition: Pointer.h:201
class SquidConfig Config
Definition: SquidConfig.cc:12
FREE destroyStoreEntry
std::ostream & CurrentException(std::ostream &os)
prints active (i.e., thrown but not yet handled) exception
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
Definition: TextException.h:63
#define Must(condition)
Definition: TextException.h:75
#define assert(EX)
Definition: assert.h:17
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
static std::ostream & Extra(std::ostream &)
Definition: debug.cc:1313
void packHeadersUsingSlowPacker(Packable &p) const
same as packHeadersUsingFastPacker() but assumes that p cannot quickly process small additions
Definition: HttpReply.cc:95
int hdr_sz
Definition: Message.h:81
ParseState pstate
the current parsing state
Definition: Message.h:94
Shared memory page identifier, address, or handler.
Definition: Page.h:24
PoolId pool
Definition: Page.h:39
uint32_t number
page number within the segment
Definition: Page.h:42
bool set() const
true if and only if both critical components have been initialized
Definition: Page.h:29
PageStack construction and SharedMemorySize calculation parameters.
Definition: PageStack.h:123
PageCount capacity
the maximum number of pages
Definition: PageStack.h:127
uint32_t poolId
pool ID
Definition: PageStack.h:125
size_t pageSize
page size, used to calculate shared memory size
Definition: PageStack.h:126
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition: PageStack.h:130
static PoolId IdForMemStoreSpace()
stack of free cache_mem slot positions
Definition: PageStack.h:167
bool pop(PageId &page)
sets value and returns true unless no free page numbers are found
Definition: PageStack.cc:442
void push(PageId &page)
makes value available as a free page number to future pop() callers
Definition: PageStack.cc:465
void useConfig() override
Definition: Segment.cc:377
static bool Enabled()
Whether shared memory support is available.
Definition: Segment.cc:322
approximate stats of a set of ReadWriteLocks
Definition: ReadWriteLock.h:71
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
Definition: StoreMap.h:111
struct Ipc::StoreMapAnchor::Basics basics
bool complete() const
Definition: StoreMap.h:77
std::atomic< uint8_t > writerHalted
whether StoreMap::abortWriting() was called for a read-locked entry
Definition: StoreMap.h:83
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:958
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition: StoreMap.cc:978
uint32_t Size
Definition: StoreMap.h:31
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:49
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:48
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition: StoreMap.h:194
StoreMapSliceId splicingPoint
the last slice in the chain still containing metadata/headers
Definition: StoreMap.h:198
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition: StoreMap.h:193
Aggregates information required for updating entry metadata and headers.
Definition: StoreMap.h:182
Edition fresh
new anchor and the updated chain prefix
Definition: StoreMap.h:209
Edition stale
old anchor and chain
Definition: StoreMap.h:208
StoreEntry * entry
the store entry being updated
Definition: StoreMap.h:207
aggregates anchor and slice owners for Init() caller convenience
Definition: StoreMap.h:233
Anchor * openForWriting(const cache_key *const key, sfileno &fileno)
Definition: StoreMap.cc:140
const Slice & readableSlice(const AnchorId anchorId, const SliceId sliceId) const
readable slice within an entry chain opened by openForReading()
Definition: StoreMap.cc:229
bool openForUpdating(Update &update, sfileno fileNoHint)
finds and locks the Update entry for an exclusive metadata update
Definition: StoreMap.cc:522
Anchor & writeableEntry(const AnchorId anchorId)
writeable anchor for the entry created by openForWriting()
Definition: StoreMap.cc:237
const Anchor & readableEntry(const AnchorId anchorId) const
readable anchor for the entry created by openForReading()
Definition: StoreMap.cc:244
int entryCount() const
number of writeable and readable entries
Definition: StoreMap.cc:738
static Owner * Init(const SBuf &path, const int slotLimit)
initialize shared memory
Definition: StoreMap.cc:42
void closeForWriting(const sfileno fileno)
successfully finish creating or updating the entry at fileno pos
Definition: StoreMap.cc:200
StoreMapCleaner * cleaner
notified before a readable entry is freed
Definition: StoreMap.h:361
void abortUpdating(Update &update)
undoes partial update, unlocks, and cleans up
Definition: StoreMap.cc:268
SliceId sliceContaining(const sfileno fileno, const uint64_t nth) const
Definition: StoreMap.cc:420
const Anchor * openForReading(const cache_key *const key, sfileno &fileno)
opens entry (identified by key) for reading, increments read level
Definition: StoreMap.cc:439
bool freeEntry(const sfileno)
Definition: StoreMap.cc:312
void closeForReading(const sfileno fileno)
closes open entry after reading, decrements read level
Definition: StoreMap.cc:496
void abortWriting(const sfileno fileno)
stop writing the entry, freeing its slot for others to use if possible
Definition: StoreMap.cc:251
void startAppending(const sfileno fileno)
restrict opened for writing entry to appending operations; allow reads
Definition: StoreMap.cc:191
void prepFreeSlice(const SliceId sliceId)
prepare a chain-unaffiliated slice for being added to an entry chain
Definition: StoreMap.cc:412
void closeForUpdating(Update &update)
makes updated info available to others, unlocks, and cleans up
Definition: StoreMap.cc:604
bool purgeOne()
either finds and frees an entry with at least 1 slice or returns false
Definition: StoreMap.cc:701
void updateStats(ReadWriteLockStats &stats) const
adds approximate current stats to the supplied ones
Definition: StoreMap.cc:750
void freeEntryByKey(const cache_key *const key)
Definition: StoreMap.cc:330
Slice & writeableSlice(const AnchorId anchorId, const SliceId sliceId)
writeable slice within an entry chain created by openForWriting()
Definition: StoreMap.cc:221
int sliceLimit() const
maximum number of slices possible
Definition: StoreMap.cc:744
int entryLimit() const
maximum entryCount() possible
Definition: StoreMap.cc:732
State of an entry with regards to the [shared] memory caching.
Definition: MemObject.h:203
int32_t index
entry position inside the memory cache
Definition: MemObject.h:205
Io io
current I/O state
Definition: MemObject.h:208
int64_t offset
bytes written/read to/from the memory cache so far
Definition: MemObject.h:206
static constexpr Io ioDone
Definition: MemObject.h:176
static constexpr Io ioUndecided
Definition: MemObject.h:173
int64_t expectedReplySize() const
Definition: MemObject.cc:238
SBuf vary_headers
Definition: MemObject.h:228
static constexpr Io ioWriting
Definition: MemObject.h:175
mem_hdr data_hdr
Definition: MemObject.h:148
const HttpReply & freshestReply() const
Definition: MemObject.h:68
static constexpr Io ioReading
Definition: MemObject.h:174
MemCache memCache
current [shared] memory caching state for the entry
Definition: MemObject.h:210
int64_t endOffset() const
Definition: MemObject.cc:214
const HttpReply & baseReply() const
Definition: MemObject.h:60
HttpReply & adjustableBaseReply()
Definition: MemObject.cc:121
bool isContiguous() const
Definition: MemObject.cc:406
int64_t object_sz
Definition: MemObject.h:222
void create() override
called when the runner should create a new memory segment
Definition: MemStore.cc:1027
Ipc::Mem::Owner< Ipc::Mem::PageStack > * spaceOwner
free slices Owner
Definition: MemStore.cc:985
MemStoreMap::Owner * mapOwner
primary map Owner
Definition: MemStore.cc:986
void useConfig() override
Definition: MemStore.cc:1020
void finalizeConfig() override
Definition: MemStore.cc:999
~MemStoreRr() override
Definition: MemStore.cc:1048
void claimMemoryNeeds() override
Definition: MemStore.cc:993
Ipc::Mem::Owner< MemStoreMapExtras > * extrasOwner
PageIds Owner.
Definition: MemStore.cc:987
Ipc::Mem::PageId * slot
local slot variable, waiting to be filled
Definition: MemStore.h:114
Ipc::Mem::PageId * page
local page variable, waiting to be filled
Definition: MemStore.h:115
bool updateAnchored(StoreEntry &) override
Definition: MemStore.cc:422
void anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
anchors StoreEntry to an already locked map entry
Definition: MemStore.cc:445
void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
Definition: MemStore.cc:341
sfileno lastWritingSlice
the last allocate slice for writing a store entry (during copyToShm)
Definition: MemStore.h:106
uint64_t currentCount() const override
the total number of objects stored right now
Definition: MemStore.cc:279
Ipc::Mem::Pointer< Extras > extras
IDs of pages with slice data.
Definition: MemStore.h:103
Ipc::Mem::PageId pageForSlice(Ipc::StoreMapSliceId sliceId)
safely returns a previously allocated memory page for the given entry slice
Definition: MemStore.cc:774
bool anchorToCache(StoreEntry &) override
Definition: MemStore.cc:401
bool updateAnchoredWith(StoreEntry &, const sfileno, const Ipc::StoreMapAnchor &)
updates Transients entry after its anchor has been located
Definition: MemStore.cc:436
void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
copies at most one slice worth of local memory to shared memory
Definition: MemStore.cc:707
void disconnect(StoreEntry &e)
called when the entry is about to forget its association with mem cache
Definition: MemStore.cc:929
uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition: MemStore.cc:260
bool shouldCache(StoreEntry &e) const
whether we should cache the entry
Definition: MemStore.cc:581
MemStore()
Definition: MemStore.cc:163
void evictCached(StoreEntry &) override
Definition: MemStore.cc:903
void copyFromShmSlice(StoreEntry &, const StoreIOBuffer &)
imports one shared memory slice into local memory
Definition: MemStore.cc:566
Ipc::Mem::Pointer< Ipc::Mem::PageStack > freeSlots
unused map slot IDs
Definition: MemStore.h:99
SlotAndPage waitingFor
a cache for a single "hot" free slot and page
Definition: MemStore.h:117
void completeWriting(StoreEntry &e)
all data has been received; there will be no more write() calls
Definition: MemStore.cc:885
void stat(StoreEntry &e) const override
Definition: MemStore.cc:217
uint64_t maxSize() const override
Definition: MemStore.cc:266
StoreEntry * get(const cache_key *) override
Definition: MemStore.cc:303
void copyToShm(StoreEntry &e)
copies all local data to shared memory
Definition: MemStore.cc:671
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: MemStore.cc:285
Ipc::StoreMap::Slice & nextAppendableSlice(const sfileno entryIndex, sfileno &sliceOffset)
Definition: MemStore.cc:738
static int64_t EntryLimit()
calculates maximum number of entries we need to store and map
Definition: MemStore.cc:957
sfileno reserveSapForWriting(Ipc::Mem::PageId &page)
finds a slot and a free page to fill or throws
Definition: MemStore.cc:785
void init() override
Definition: MemStore.cc:173
void evictIfFound(const cache_key *) override
Definition: MemStore.cc:922
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition: MemStore.cc:291
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition: MemStore.h:68
void updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
Definition: MemStore.cc:361
static bool Requested()
Definition: MemStore.cc:950
bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
copies the entire entry from shared to local memory
Definition: MemStore.cc:470
bool dereference(StoreEntry &e) override
Definition: MemStore.cc:296
~MemStore() override
Definition: MemStore.cc:167
void write(StoreEntry &e)
copy non-shared entry data of the being-cached entry to our cache
Definition: MemStore.cc:846
uint64_t currentSize() const override
current size
Definition: MemStore.cc:272
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition: MemStore.cc:255
bool startCaching(StoreEntry &e)
locks map anchor and preps to store the entry in shared memory
Definition: MemStore.cc:648
void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) override
adjust slice-linked state before a locked Readable slice is erased
Definition: MemStore.cc:824
MemStoreMap * map
index of mem-cached entries
Definition: MemStore.h:100
void getStats(StoreInfoStats &stats) const override
collect statistics
Definition: MemStore.cc:204
Definition: SBuf.h:94
const char * rawContent() const
Definition: SBuf.cc:509
SBuf & vappendf(const char *fmt, va_list vargs)
Definition: SBuf.cc:239
const char * c_str()
Definition: SBuf.cc:516
size_type length() const
Returns the number of bytes stored in SBuf.
Definition: SBuf.h:415
bool isEmpty() const
Definition: SBuf.h:431
SBuf & append(const SBuf &S)
Definition: SBuf.cc:185
void copyToShm()
copies the entire buffer to shared memory
Definition: MemStore.cc:117
StoreEntry * entry
the entry being updated
Definition: MemStore.cc:47
const char * buf
content being appended now
Definition: MemStore.cc:67
Ipc::StoreMapSliceId firstSlice
Definition: MemStore.cc:51
int bufWritten
buf bytes appended so far
Definition: MemStore.cc:69
uint64_t totalWritten
cumulative number of bytes appended so far
Definition: MemStore.cc:56
MemStore & store
Definition: MemStore.cc:63
Ipc::StoreMapSliceId lastSlice
the slot keeping the last byte of the appended content (at least)
Definition: MemStore.cc:54
void append(const char *aBuf, int aSize) override
Appends a c-string to existing packed data.
Definition: MemStore.cc:89
void vappendf(const char *fmt, va_list ap) override
Definition: MemStore.cc:105
ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice=-1)
Definition: MemStore.cc:74
const sfileno fileNo
Definition: MemStore.cc:64
void copyToShmSlice(Ipc::StoreMap::Slice &slice)
copies at most one slice worth of buffer to shared memory
Definition: MemStore.cc:135
int bufSize
buf size
Definition: MemStore.cc:68
size_t memMaxSize
Definition: SquidConfig.h:91
Store::DiskConfig cacheSwap
Definition: SquidConfig.h:423
size_t maxInMemObjSize
Definition: SquidConfig.h:268
struct SquidConfig::@104 Store
YesNoNone memShared
whether the memory cache is shared among workers
Definition: SquidConfig.h:89
mem_status_t mem_status
Definition: Store.h:240
uint16_t flags
Definition: Store.h:232
MemObject & mem()
Definition: Store.h:51
int locked() const
Definition: Store.h:146
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition: Store.h:213
bool memoryCachable()
checkCachable() and can be cached in memory
Definition: store.cc:1262
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition: store.cc:1915
const cache_key * publicKey() const
Definition: Store.h:113
void memOutDecision(const bool willCacheInRam)
Definition: store.cc:1777
void storeWriterDone()
called when a store writer ends its work (successfully or not)
Definition: store.cc:1794
MemObject * mem_obj
Definition: Store.h:221
store_status_t store_status
Definition: Store.h:244
void createMemObject()
Definition: store.cc:1561
uint64_t swap_file_sz
Definition: Store.h:230
void setMemStatus(mem_status_t)
Definition: store.cc:1510
void destroyMemObject()
Definition: store.cc:372
int64_t offset
Definition: StoreIOBuffer.h:58
High-level store statistics used by mgr:info action. Used inside PODs!
Definition: StoreStats.h:14
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition: Controller.cc:187
an std::runtime_error with thrower location info
Definition: TextException.h:21
void configure(bool beSet)
enables or disables the option; updating to 'configured' state
Definition: YesNoNone.h:53
bool configured() const
Definition: YesNoNone.h:67
ssize_t copy(StoreIOBuffer const &) const
Definition: stmem.cc:189
bool write(StoreIOBuffer const &)
Definition: stmem.cc:305
A const & max(A const &lhs, A const &rhs)
A const & min(A const &lhs, A const &rhs)
#define DBG_IMPORTANT
Definition: Stream.h:38
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Stream.h:194
#define EBIT_SET(flag, bit)
Definition: defines.h:67
#define EBIT_TEST(flag, bit)
Definition: defines.h:69
@ NOT_IN_MEMORY
Definition: enums.h:35
@ IN_MEMORY
Definition: enums.h:36
@ ENTRY_VALIDATED
Definition: enums.h:113
@ ENTRY_SPECIAL
Definition: enums.h:84
@ ENTRY_FWD_HDR_WAIT
Definition: enums.h:111
@ STORE_PENDING
Definition: enums.h:51
@ STORE_OK
Definition: enums.h:50
void fatal(const char *message)
Definition: fatal.cc:28
int shutting_down
size_t PageLevel()
approximate total number of shared memory pages used now
Definition: Pages.cc:80
bool GetPage(const PageId::Purpose purpose, PageId &page)
sets page ID and returns true unless no free pages are found
Definition: Pages.cc:34
size_t PagesAvailable()
approximate total number of shared memory pages we can allocate now
Definition: Pages.h:47
size_t PageSize()
returns page size in bytes; all pages are assumed to be the same size
Definition: Pages.cc:28
void NotePageNeed(const int purpose, const int count)
claim the need for a number of pages for a given purpose
Definition: Pages.cc:72
void PutPage(PageId &page)
makes identified page available as a free page to future GetPage() callers
Definition: Pages.cc:41
char * PagePointer(const PageId &page)
converts page handler into a temporary writeable shared memory pointer
Definition: Pages.cc:48
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition: Pages.cc:55
int32_t StoreMapSliceId
Definition: StoreMap.h:24
double doublePercent(const double, const double)
Definition: SquidMath.cc:25
class Ping::pingStats_ stats
Controller & Root()
safely access controller singleton
Definition: Controller.cc:938
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition: Stream.h:63
unsigned char cache_key
Store key.
Definition: forward.h:29
signed_int32_t sfileno
Definition: forward.h:22
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition: store.cc:841
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:105
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:696
#define PRId64
Definition: types.h:104

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors