Go to the documentation of this file.
61 debugs(54, 5,
"attached " <<
path <<
" with " <<
71 const Anchor &inode = anchorAt(fileno);
79 return diff < 0 ? -1 : +1;
87 Anchor &inode = anchorAt(fileno);
100 debugs(54, 8,
"closed entry " << fileno <<
" for writing " << path);
107 <<
" for reading " << path);
110 auto idx = fileNoByKey(key);
111 if (
const auto anchor = openForReadingAt(idx, key)) {
117 idx = fileNoByKey(key);
118 if (
auto anchor = openForWritingAt(idx)) {
120 anchor->lock.switchExclusiveToShared();
122 assert(anchor->complete());
124 debugs(54, 5,
"switched entry " << fileno <<
" from writing to reading " << path);
130 idx = fileNoByKey(key);
131 if (
const auto anchor = openForReadingAt(idx, key)) {
144 <<
" for writing " << path);
145 const int idx = fileNoByKey(key);
147 if (
Anchor *anchor = openForWritingAt(idx)) {
158 Anchor &s = anchorAt(fileno);
167 debugs(54, 5,
"cannot open existing entry " << fileno <<
168 " for writing " << path);
174 freeChain(fileno, s,
true);
182 debugs(54, 5,
"opened entry " << fileno <<
" for writing " << path);
186 debugs(54, 5,
"cannot open busy entry " << fileno <<
187 " for writing " << path);
194 Anchor &s = anchorAt(fileno);
197 debugs(54, 5,
"restricted entry " << fileno <<
" to appending " << path);
203 Anchor &s = anchorAt(fileno);
207 debugs(54, 5,
"closed entry " << fileno <<
" for writing " << path);
214 debugs(54, 5,
"switching entry " << fileno <<
" from writing to reading " << path);
215 Anchor &s = anchorAt(fileno);
224 assert(anchorAt(anchorId).writing());
225 assert(validSlice(sliceId));
226 return sliceAt(sliceId);
232 assert(anchorAt(anchorId).reading());
233 assert(validSlice(sliceId));
234 return sliceAt(sliceId);
240 assert(anchorAt(anchorId).writing());
241 return anchorAt(anchorId);
247 assert(anchorAt(anchorId).reading());
248 return anchorAt(anchorId);
254 debugs(54, 5,
"aborting entry " << fileno <<
" for writing " << path);
255 Anchor &s = anchorAt(fileno);
258 freeChain(fileno, s,
false);
259 debugs(54, 5,
"closed idle entry " << fileno <<
" for writing " << path);
264 debugs(54, 5,
"closed busy entry " << fileno <<
" for writing " << path);
272 debugs(54, 5,
"aborting entry " << fileno <<
" for updating " << path);
283 debugs(54, 5,
"aborted entry " << fileno <<
" for updating " << path);
289 const Anchor &s = anchorAt(fileno);
299 const Anchor &s = anchorAt(fileno);
309 return anchorAt(fileno);
315 debugs(54, 5,
"marking entry " << fileno <<
" to be freed in " << path);
317 Anchor &s = anchorAt(fileno);
321 freeChain(fileno, s,
false);
325 uint8_t expected =
false;
334 <<
" to be freed in " << path);
336 const int idx = fileNoByKey(key);
337 Anchor &s = anchorAt(idx);
340 freeChain(idx, s,
true);
357 const int idx = fileNoByKey(key);
358 const Anchor &s = anchorAt(idx);
366 if (openForReading(
reinterpret_cast<const cache_key*
>(key), index)) {
367 closeForReading(index);
377 debugs(54, 7,
"freeing entry " << fileno <<
386 debugs(54, 5,
"freed entry " << fileno <<
" in " << path);
393 static uint64_t ChainId = 0;
394 const uint64_t chainId = ++ChainId;
395 debugs(54, 7,
"freeing chain #" << chainId <<
" starting at " << sliceId <<
" in " << path);
396 while (sliceId >= 0) {
397 Slice &slice = sliceAt(sliceId);
401 cleaner->noteFreeMapSlice(sliceId);
402 if (sliceId == splicingPoint) {
403 debugs(54, 5,
"preserving chain #" << chainId <<
" in " << path <<
404 " suffix after slice " << splicingPoint);
409 debugs(54, 7,
"freed chain #" << chainId <<
" in " << path);
416 assert(validSlice(sliceId));
417 sliceAt(sliceId).clear();
423 const Anchor &anchor = anchorAt(fileno);
425 uint64_t bytesSeen = 0;
427 while (lastSlice >= 0) {
428 const Slice &slice = sliceAt(lastSlice);
429 bytesSeen += slice.
size;
430 if (bytesSeen >= bytesNeeded)
432 lastSlice = slice.
next;
434 debugs(54, 7,
"entry " << fileno <<
" has " << bytesNeeded <<
'/' << bytesSeen <<
435 " bytes at slice " << lastSlice <<
" in " << path);
443 <<
" for reading " << path);
444 const int idx = fileNoByKey(key);
445 if (
const auto anchor = openForReadingAt(idx, key)) {
455 debugs(54, 5,
"opening entry " << fileno <<
" for reading " << path);
456 Anchor &s = anchorAt(fileno);
459 debugs(54, 5,
"cannot open busy entry " << fileno <<
460 " for reading " << path);
466 debugs(54, 7,
"cannot open empty entry " << fileno <<
467 " for reading " << path);
473 debugs(54, 7,
"cannot open marked entry " << fileno <<
474 " for reading " << path);
480 debugs(54, 5,
"cannot open wrong-key entry " << fileno <<
481 " for reading " << path);
487 debugs(54, 5,
"cannot open corrupted entry " << fileno <<
488 " for reading " << path);
492 debugs(54, 5,
"opened entry " << fileno <<
" for reading " << path);
499 Anchor &s = anchorAt(fileno);
502 debugs(54, 5,
"closed entry " << fileno <<
" for reading " << path);
508 auto &s = anchorAt(fileno);
511 if (!s.lock.unlockSharedAndSwitchToExclusive()) {
512 debugs(54, 5,
"closed entry " << fileno <<
" for reading " << path);
518 freeChain(fileno, s,
false);
519 debugs(54, 5,
"closed idle entry " << fileno <<
" for reading " << path);
530 if (!validEntry(fileNoHint)) {
532 " for updating " << path);
538 debugs(54, 5,
"opening entry " << update.
stale.
fileNo <<
" of " << entry <<
" for updating " << path);
543 debugs(54, 5,
"cannot open unreadable entry " << update.
stale.
fileNo <<
" for updating " << path);
554 " for updating " << path);
561 " for updating " << path);
568 if (!openKeyless(update.
fresh)) {
570 " for updating " << path);
571 abortUpdating(update);
578 debugs(54, 5,
"opened entry " << update.
stale.
fileNo <<
" for updating " << path <<
579 " using entry " << update.
fresh.
fileNo <<
" of " << entry);
589 return visitVictims([&](
const sfileno name) {
595 " for updating " << path);
625 if (freshSplicingSlice.
next < 0)
626 freshSplicingSlice.
next = suffixStart;
628 Must(freshSplicingSlice.
next == suffixStart);
662 const Update updateSaved = update;
674 " named " << updateSaved.
stale.
name <<
" for updating " << path <<
677 "] prefix containing at least " << freshSplicingSlice.
size <<
" bytes");
689 const int searchLimit =
min(10000, entryLimit());
691 for (; tries < searchLimit; ++tries) {
692 const sfileno name =
static_cast<sfileno>(++anchors->victim % entryLimit());
697 debugs(54, 5,
"no victims found in " << path <<
"; tried: " << tries);
704 return visitVictims([&](
const sfileno name) {
705 const sfileno fileno = fileNoByName(name);
706 Anchor &s = anchorAt(fileno);
711 freeChain(fileno, s,
false);
712 debugs(54, 5,
"purged entry " << fileno <<
" from " << path);
728 assert(validSlice(sliceId));
729 sliceAt(sliceId) = slice;
741 return anchors->count;
747 return slices->capacity;
753 for (
int i = 0; i < anchors->capacity; ++i)
754 anchorAt(i).lock.updateStats(stats);
760 return 0 <= pos && pos < entryLimit();
766 return 0 <= pos && pos < sliceLimit();
775 typedef std::chrono::high_resolution_clock
Clock;
778 startTime(
Clock::now()),
780 maxTime(startTime +
max) {}
784 const auto currentTime = Clock::now();
785 if (currentTime < lastTime)
787 lastTime = currentTime;
788 return lastTime > maxTime;
806 const auto &anchor = anchorAt(fileno);
810 if (!anchor.basics.swap_file_sz) {
815 if (!anchor.lock.lockHeaders()) {
820 const uint64_t expectedByteCount = anchor.basics.swap_file_sz;
822 size_t actualSliceCount = 0;
823 uint64_t actualByteCount = 0;
824 SliceId lastSeenSlice = anchor.start;
825 while (lastSeenSlice >= 0) {
827 if (!validSlice(lastSeenSlice))
829 const auto &slice = sliceAt(lastSeenSlice);
830 actualByteCount += slice.size;
831 if (actualByteCount > expectedByteCount)
833 lastSeenSlice = slice.next;
834 if (timeIsLimited && timer.
expired()) {
835 anchor.lock.unlockHeaders();
841 anchor.lock.unlockHeaders();
843 if (actualByteCount == expectedByteCount && lastSeenSlice < 0)
850 " expected swap_file_sz=" << expectedByteCount <<
851 " actual swap_file_sz=" << actualByteCount <<
852 " actual slices=" << actualSliceCount <<
853 " last slice seen=" << lastSeenSlice <<
"\n" <<
855 " tmestmp=" << anchor.basics.timestamp <<
"\n" <<
856 " lastref=" << anchor.basics.lastref <<
"\n" <<
857 " expires=" << anchor.basics.expires <<
"\n" <<
858 " lastmod=" << anchor.basics.lastmod <<
"\n" <<
859 " refcount=" << anchor.basics.refcount <<
"\n" <<
860 " flags=0x" <<
asHex(anchor.basics.flags) <<
"\n" <<
861 " start=" << anchor.start <<
"\n" <<
862 " splicingPoint=" << anchor.splicingPoint <<
"\n" <<
863 " lock=" << anchor.lock <<
"\n" <<
864 " waitingToBeFreed=" << (anchor.waitingToBeFreed ? 1 : 0) <<
"\n"
873 assert(validEntry(fileno));
874 return anchors->items[fileno];
887 const uint64_t *
const k =
reinterpret_cast<const uint64_t *
>(key);
889 const int hash = (k[0] + k[1]) % entryLimit();
898 if (
const int item = fileNos->items[name])
908 fileNos->items[name] = fileno+1;
914 const int name = nameByKey(key);
915 return fileNoByName(name);
921 return anchorAt(fileNoByKey(key));
927 assert(validSlice(sliceId));
928 return slices->items[sliceId];
947 memcpy(key, aKey,
sizeof(key));
954 const uint64_t *
const k =
reinterpret_cast<const uint64_t *
>(aKey);
955 return k[0] == key[0] && k[1] == key[1];
961 assert(writing() && !reading());
962 setKey(
reinterpret_cast<const cache_key*
>(aKey ? aKey : from.
key));
972 uint16_t cleanFlags = from.
flags;
975 basics.flags = cleanFlags;
992 into.
flags = basics.flags;
996 if (collapsingRequired)
1008 memset(&key, 0,
sizeof(key));
1010 waitingToBeFreed =
false;
1011 writerHalted =
false;
1033 entry->unlock(
"Ipc::StoreMapUpdate");
1057 capacity(aCapacity),
1065 return SharedMemorySize(capacity);
#define EBIT_CLR(flag, bit)
Anchor & writeableEntry(const AnchorId anchorId)
writeable anchor for the entry created by openForWriting()
void freeEntryByKey(const cache_key *const key)
approximate stats of a set of ReadWriteLocks
void closeForWriting(const sfileno fileno)
successfully finish creating or updating the entry at fileno pos
void startAppending(const sfileno fileno)
restrict opened for writing entry to appending operations; allow reads
uint64_t refusalsDueToZeroSize
uint64_t refusalsDueToLocking
void importSlice(const SliceId sliceId, const Slice &slice)
copies slice to its designated position
unsigned char cache_key
Store key.
#define EBIT_SET(flag, bit)
bool lockShared()
lock for reading or return false
Clock::time_point startTime
the object creation time
void lock(const char *context)
StoreMap(const SBuf &aPath)
bool sameKey(const cache_key *const aKey) const
void relocate(const sfileno name, const sfileno fileno)
map name to fileNo
bool lockExclusive()
lock for modification or return false
size_t sharedMemorySize() const
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Edition fresh
new anchor and the updated chain prefix
std::atomic< bool > appending
the writer has promised to only append
void forgetWritingEntry(const sfileno fileno)
const Anchor * peekAtReader(const sfileno fileno) const
SliceId sliceContaining(const sfileno fileno, const uint64_t nth) const
Mem::Pointer< StoreMapAnchors > anchors
entry inodes (starting blocks)
const A & max(A const &lhs, A const &rhs)
std::atomic_flag updating
a reader is updating metadata/headers
ReadWriteLock lock
protects slot data below
const Anchor * openForReadingAt(const sfileno, const cache_key *const)
opens entry (identified by sfileno) for reading, increments read level
void freeChain(const sfileno fileno, Anchor &inode, const bool keepLock)
unconditionally frees an already locked chain of slots, unlocking if needed
void unlockShared()
undo successful sharedLock()
bool validEntry(const int n) const
whether n is a valid slice coordinate
const Anchor * openOrCreateForReading(const cache_key *, sfileno &)
openForReading() but creates a new entry if there is no old one
bool expired()
whether the current time reached the provided maximum time
std::chrono::nanoseconds paranoid_hit_validation
Anchor & anchorAt(const sfileno fileno)
void updateStats(ReadWriteLockStats &stats) const
adds approximate current stats to the supplied ones
void prepFreeSlice(const SliceId sliceId)
prepare a chain-unaffiliated slice for being added to an entry chain
Mem::Pointer< StoreMapSlices > slices
chained entry pieces positions
static size_t SharedMemorySize(const int anAnchorLimit)
static Owner * Init(const SBuf &path, const int slotLimit)
initialize shared memory
Mem::Pointer< StoreMapFileNos > fileNos
entry inodes (starting blocks)
std::atomic< uint8_t > writerHalted
whether StoreMap::abortWriting() was called for a read-locked entry
void unlockExclusive()
undo successful exclusiveLock()
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
StoreMapUpdate(StoreEntry *anEntry)
std::atomic< StoreMapSliceId > splicingPoint
void closeForUpdating(Update &update)
makes updated info available to others, unlocks, and cleans up
const Anchor * openForReading(const cache_key *const key, sfileno &fileno)
opens entry (identified by key) for reading, increments read level
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
struct Ipc::StoreMapAnchor::Basics basics
const Anchor & readableEntry(const AnchorId anchorId) const
readable anchor for the entry created by openForReading()
int entryLimit() const
maximum entryCount() possible
Edition stale
old anchor and chain
bool openKeyless(Update::Edition &edition)
int sliceLimit() const
maximum number of slices possible
bool validateHit(const sfileno)
void closeForReading(const sfileno fileno)
closes open entry after reading, decrements read level
AsHex< Integer > asHex(const Integer n)
a helper to ease AsHex object creation
bool markedForDeletion(const cache_key *key) const
StoreEntry * entry
the store entry being updated
void setKey(const cache_key *const aKey)
bool validSlice(const int n) const
whether n is a valid slice coordinate
void rewind()
undo the effects of set(), setKey(), etc., but keep locks and state
const SBuf path
cache_dir path or similar cache name; for logging
bool lockHeaders()
lock for [readable] metadata update or return false
int entryCount() const
number of writeable and readable entries
Aggregates information required for updating entry metadata and headers.
std::atomic< uint8_t > waitingToBeFreed
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
sfileno nameByKey(const cache_key *const key) const
computes entry name (i.e., key hash) for a given entry key
int compareVersions(const sfileno oldFileno, time_t newVersion) const
Anchor * openForWriting(const cache_key *const key, sfileno &fileno)
Slice & sliceAt(const SliceId sliceId)
void clear()
restore default-constructed state
static SBuf Name(const SBuf &prefix, const char *suffix)
concatenates parts of a name to form a complete name (or its prefix)
bool freeEntry(const sfileno)
Anchor & anchorByKey(const cache_key *const key)
Clock::time_point lastTime
the time of the last expired() call, initially equals to startTime
std::chrono::high_resolution_clock Clock
During an update, the stored entry has two editions: stale and fresh.
ConservativeTimer(const Clock::duration max)
sfileno fileNoByName(const sfileno name) const
computes anchor position for a given entry name
bool hasReadableEntry(const cache_key *const)
whether the index contains a valid readable entry with the given key
void unlockHeaders()
undo successful lockHeaders()
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
void AssertFlagIsSet(std::atomic_flag &flag)
StoreMapSliceId splicingPoint
the last slice in the chain still containing metadata/headers
const Slice & readableSlice(const AnchorId anchorId, const SliceId sliceId) const
readable slice within an entry chain opened by openForReading()
const Anchor * peekAtWriter(const sfileno fileno) const
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
static SBuf StoreMapFileNosId(const SBuf &path)
uint64_t refusalsDueToTimeLimit
const Clock::time_point maxTime
after going past this point in time, expired() becomes true
bool stopAppendingAndRestoreExclusive()
bool purgeOne()
either finds and frees an entry with at least 1 slice or returns false
StoreMapAnchors(const int aCapacity)
bool markedForDeletion(const cache_key *const)
const Anchor & peekAtEntry(const sfileno fileno) const
void closeForReadingAndFreeIdle(const sfileno fileno)
same as closeForReading() but also frees the entry if it is unlocked
const char * storeKeyText(const cache_key *key)
struct StatCounters::@121 hitValidation
sfileno name
StoreEntry position in StoreMap::fileNos, for swapping Editions.
void abortWriting(const sfileno fileno)
stop writing the entry, freeing its slot for others to use if possible
void lastModified(const time_t when)
@ ENTRY_REQUIRES_COLLAPSING
std::atomic< Size > size
slice contents size
bool hittingRequiresCollapsing() const
whether this entry can feed collapsed requests and only them
static SBuf StoreMapAnchorsId(const SBuf &path)
bool visitVictims(const NameFilter filter)
Anchor * openForWritingAt(sfileno fileno, bool overwriteExisting=true)
void switchExclusiveToShared()
static SBuf StoreMapSlicesId(const SBuf &path)
void abortUpdating(Update &update)
undoes partial update, unlocks, and cleans up
bool openForUpdating(Update &update, sfileno fileNoHint)
finds and locks the Update entry for an exclusive metadata update
void freeChainAt(SliceId sliceId, const SliceId splicingPoint)
unconditionally frees an already locked chain of slots; no anchor maintenance
#define debugs(SECTION, LEVEL, CONTENT)
const A & min(A const &lhs, A const &rhs)
sfileno fileNoByKey(const cache_key *const key) const
computes map entry anchor position for a given entry key
aggregates anchor and slice owners for Init() caller convenience
void startAppending()
writer keeps its lock but also allows reading
std::function< bool(const sfileno name)> NameFilter
void switchWritingToReading(const sfileno fileno)
stop writing (or updating) the locked entry and start reading it
Slice & writeableSlice(const AnchorId anchorId, const SliceId sliceId)
writeable slice within an entry chain created by openForWriting()
Controller & Root()
safely access controller singleton