RockRebuild.cc
Go to the documentation of this file.
1 /*
2  * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 79 Disk IO Routines */
10 
11 #include "squid.h"
12 #include "base/AsyncJobCalls.h"
13 #include "fs/rock/RockDbCell.h"
14 #include "fs/rock/RockRebuild.h"
15 #include "fs/rock/RockSwapDir.h"
16 #include "fs_io.h"
17 #include "globals.h"
18 #include "ipc/StoreMap.h"
19 #include "md5.h"
20 #include "SquidTime.h"
21 #include "Store.h"
22 #include "store_rebuild.h"
23 #include "tools.h"
24 
25 #include <cerrno>
26 
27 CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild);
28 
74 namespace Rock
75 {
76 
79 {
80 public:
81  LoadingFlags(): state(0), anchored(0), mapped(0), finalized(0), freed(0) {}
82 
83  /* for LoadingEntry */
84  uint8_t state:3;
85  uint8_t anchored:1;
86 
87  /* for LoadingSlot */
88  uint8_t mapped:1;
89  uint8_t finalized:1;
90  uint8_t freed:1;
91 };
92 
95 {
96 public:
97  LoadingEntry(const sfileno fileNo, LoadingParts &source);
98 
99  uint64_t &size;
100  uint32_t &version;
101 
104 
105  /* LoadingFlags::state */
106  State state() const { return static_cast<State>(flags.state); }
107  void state(State aState) const { flags.state = aState; }
108 
109  /* LoadingFlags::anchored */
110  bool anchored() const { return flags.anchored; }
111  void anchored(const bool beAnchored) { flags.anchored = beAnchored; }
112 
113 private:
115 };
116 
119 {
120 public:
121  LoadingSlot(const SlotId slotId, LoadingParts &source);
122 
125 
126  /* LoadingFlags::mapped */
127  bool mapped() const { return flags.mapped; }
128  void mapped(const bool beMapped) { flags.mapped = beMapped; }
129 
130  /* LoadingFlags::finalized */
131  bool finalized() const { return flags.finalized; }
132  void finalized(const bool beFinalized) { flags.finalized = beFinalized; }
133 
134  /* LoadingFlags::freed */
135  bool freed() const { return flags.freed; }
136  void freed(const bool beFreed) { flags.freed = beFreed; }
137 
138  bool used() const { return freed() || mapped() || more != -1; }
139 
140 private:
142 };
143 
147 {
148 public:
149  LoadingParts(int dbSlotLimit, int dbEntryLimit);
150  LoadingParts(LoadingParts&&) = delete; // paranoid (often too huge to copy)
151 
152 private:
153  friend class LoadingEntry;
154  friend class LoadingSlot;
155 
156  /* Anti-padding storage. With millions of entries, padding matters! */
157 
158  /* indexed by sfileno */
159  std::vector<uint64_t> sizes;
160  std::vector<uint32_t> versions;
161 
162  /* indexed by SlotId */
163  std::vector<Ipc::StoreMapSliceId> mores;
164 
165  /* entry flags are indexed by sfileno; slot flags -- by SlotId */
166  std::vector<LoadingFlags> flags;
167 };
168 
169 } /* namespace Rock */
170 
171 /* LoadingEntry */
172 
174  size(source.sizes.at(fileNo)),
175  version(source.versions.at(fileNo)),
176  flags(source.flags.at(fileNo))
177 {
178 }
179 
180 /* LoadingSlot */
181 
183  more(source.mores.at(slotId)),
184  flags(source.flags.at(slotId))
185 {
186 }
187 
188 /* LoadingParts */
189 
190 Rock::LoadingParts::LoadingParts(const int dbEntryLimit, const int dbSlotLimit):
191  sizes(dbEntryLimit, 0),
192  versions(dbEntryLimit, 0),
193  mores(dbSlotLimit, -1),
194  flags(dbSlotLimit)
195 {
196  assert(sizes.size() == versions.size()); // every entry has both fields
197  assert(sizes.size() <= mores.size()); // every entry needs slot(s)
198  assert(mores.size() == flags.size()); // every slot needs a set of flags
199 }
200 
201 /* Rebuild */
202 
203 Rock::Rebuild::Rebuild(SwapDir *dir): AsyncJob("Rock::Rebuild"),
204  sd(dir),
205  parts(nullptr),
206  dbSize(0),
207  dbSlotSize(0),
208  dbSlotLimit(0),
209  dbEntryLimit(0),
210  fd(-1),
211  dbOffset(0),
212  loadingPos(0),
213  validationPos(0)
214 {
215  assert(sd);
216  memset(&counts, 0, sizeof(counts));
217  dbSize = sd->diskOffsetLimit(); // we do not care about the trailer waste
222 }
223 
225 {
226  if (fd >= 0)
227  file_close(fd);
228  delete parts;
229 }
230 
232 void
234 {
235  // in SMP mode, only the disker is responsible for populating the map
236  if (UsingSmp() && !IamDiskProcess()) {
237  debugs(47, 2, "Non-disker skips rebuilding of cache_dir #" <<
238  sd->index << " from " << sd->filePath);
239  mustStop("non-disker");
240  return;
241  }
242 
243  debugs(47, DBG_IMPORTANT, "Loading cache_dir #" << sd->index <<
244  " from " << sd->filePath);
245 
246  fd = file_open(sd->filePath, O_RDONLY | O_BINARY);
247  if (fd < 0)
248  failure("cannot open db", errno);
249 
250  char hdrBuf[SwapDir::HeaderSize];
251  if (read(fd, hdrBuf, sizeof(hdrBuf)) != SwapDir::HeaderSize)
252  failure("cannot read db header", errno);
253 
254  // slot prefix of SM_PAGE_SIZE should fit both core entry header and ours
255  assert(sizeof(DbCellHeader) < SM_PAGE_SIZE);
257 
258  dbOffset = SwapDir::HeaderSize;
259 
260  parts = new LoadingParts(dbEntryLimit, dbSlotLimit);
261 
262  checkpoint();
263 }
264 
266 void
268 {
269  if (!done())
270  eventAdd("Rock::Rebuild", Rock::Rebuild::Steps, this, 0.01, 1, true);
271 }
272 
273 bool
275 {
276  return loadingPos >= dbSlotLimit;
277 }
278 
279 bool
281 {
282  // paranoid slot checking is only enabled with squid -S
283  return validationPos >= dbEntryLimit +
284  (opt_store_doublecheck ? dbSlotLimit : 0);
285 }
286 
287 bool
289 {
290  return doneLoading() && doneValidating() && AsyncJob::doneAll();
291 }
292 
293 void
295 {
296  // use async call to enable job call protection that time events lack
297  CallJobHere(47, 5, static_cast<Rebuild*>(data), Rock::Rebuild, steps);
298 }
299 
300 void
302 {
303  if (!doneLoading())
304  loadingSteps();
305  else
306  validationSteps();
307 
308  checkpoint();
309 }
310 
311 void
313 {
314  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
315  dbOffset << " <= " << dbSize);
316 
317  // Balance our desire to maximize the number of entries processed at once
318  // (and, hence, minimize overheads and total rebuild time) with a
319  // requirement to also process Coordinator events, disk I/Os, etc.
320  const int maxSpentMsec = 50; // keep small: most RAM I/Os are under 1ms
321  const timeval loopStart = current_time;
322 
323  int loaded = 0;
324  while (!doneLoading()) {
325  loadOneSlot();
326  dbOffset += dbSlotSize;
327  ++loadingPos;
328  ++loaded;
329 
330  if (counts.scancount % 1000 == 0)
331  storeRebuildProgress(sd->index, dbSlotLimit, counts.scancount);
332 
334  continue; // skip "few entries at a time" check below
335 
336  getCurrentTime();
337  const double elapsedMsec = tvSubMsec(loopStart, current_time);
338  if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
339  debugs(47, 5, HERE << "pausing after " << loaded << " entries in " <<
340  elapsedMsec << "ms; " << (elapsedMsec/loaded) << "ms per entry");
341  break;
342  }
343  }
344 }
345 
348 {
349  Must(0 <= fileNo && fileNo < dbEntryLimit);
350  return LoadingEntry(fileNo, *parts);
351 }
352 
355 {
356  Must(0 <= slotId && slotId < dbSlotLimit);
357  Must(slotId <= loadingPos); // cannot look ahead
358  return LoadingSlot(slotId, *parts);
359 }
360 
361 void
363 {
364  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
365  dbOffset << " <= " << dbSize);
366 
367  ++counts.scancount;
368 
369  if (lseek(fd, dbOffset, SEEK_SET) < 0)
370  failure("cannot seek to db entry", errno);
371 
372  buf.reset();
373 
374  if (!storeRebuildLoadEntry(fd, sd->index, buf, counts))
375  return;
376 
377  const SlotId slotId = loadingPos;
378 
379  // get our header
380  DbCellHeader header;
381  if (buf.contentSize() < static_cast<mb_size_t>(sizeof(header))) {
382  debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
383  "Ignoring truncated " << buf.contentSize() << "-byte " <<
384  "cache entry meta data at " << dbOffset);
385  freeUnusedSlot(slotId, true);
386  return;
387  }
388  memcpy(&header, buf.content(), sizeof(header));
389  if (header.empty()) {
390  freeUnusedSlot(slotId, false);
391  return;
392  }
393  if (!header.sane(dbSlotSize, dbSlotLimit)) {
394  debugs(47, DBG_IMPORTANT, "WARNING: cache_dir[" << sd->index << "]: " <<
395  "Ignoring malformed cache entry meta data at " << dbOffset);
396  freeUnusedSlot(slotId, true);
397  return;
398  }
399  buf.consume(sizeof(header)); // optimize to avoid memmove()
400 
401  useNewSlot(slotId, header);
402 }
403 
405 bool
407 {
409  StoreEntry loadedE;
410  const uint64_t knownSize = header.entrySize > 0 ?
411  header.entrySize : anchor.basics.swap_file_sz.load();
412  if (!storeRebuildParseEntry(buf, loadedE, key, counts, knownSize))
413  return false;
414 
415  // the entry size may be unknown, but if it is known, it is authoritative
416 
417  debugs(47, 8, "importing basics for entry " << fileno <<
418  " inode.entrySize: " << header.entrySize <<
419  " swap_file_sz: " << loadedE.swap_file_sz);
420  anchor.set(loadedE);
421 
422  // we have not validated whether all db cells for this entry were loaded
424 
425  // loadedE->dump(5);
426 
427  return true;
428 }
429 
430 void
432 {
433  debugs(47, 5, sd->index << " validating from " << validationPos);
434 
435  // see loadingSteps() for the rationale; TODO: avoid duplication
436  const int maxSpentMsec = 50; // keep small: validation does not do I/O
437  const timeval loopStart = current_time;
438 
439  int validated = 0;
440  while (!doneValidating()) {
441  if (validationPos < dbEntryLimit)
442  validateOneEntry(validationPos);
443  else
444  validateOneSlot(validationPos - dbEntryLimit);
445  ++validationPos;
446  ++validated;
447 
448  if (validationPos % 1000 == 0)
449  debugs(20, 2, "validated: " << validationPos);
450 
452  continue; // skip "few entries at a time" check below
453 
454  getCurrentTime();
455  const double elapsedMsec = tvSubMsec(loopStart, current_time);
456  if (elapsedMsec > maxSpentMsec || elapsedMsec < 0) {
457  debugs(47, 5, "pausing after " << validated << " entries in " <<
458  elapsedMsec << "ms; " << (elapsedMsec/validated) << "ms per entry");
459  break;
460  }
461  }
462 }
463 
466 void
468 {
469  // walk all map-linked slots, starting from inode, and mark each
470  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileNo);
471  Must(le.size > 0); // paranoid
472  uint64_t mappedSize = 0;
473  SlotId slotId = anchor.start;
474  while (slotId >= 0 && mappedSize < le.size) {
475  LoadingSlot slot = loadingSlot(slotId); // throws if we have not loaded that slot
476  Must(!slot.finalized()); // no loops or stealing from other entries
477  Must(slot.mapped()); // all our slots should be in the sd->map
478  Must(!slot.freed()); // all our slots should still be present
479  slot.finalized(true);
480 
481  Ipc::StoreMapSlice &mapSlice = sd->map->writeableSlice(fileNo, slotId);
482  Must(mapSlice.size > 0); // paranoid
483  mappedSize += mapSlice.size;
484  slotId = mapSlice.next;
485  }
486  /* no hodgepodge entries: one entry - one full chain and no leftovers */
487  Must(slotId < 0);
488  Must(mappedSize == le.size);
489 
490  if (!anchor.basics.swap_file_sz)
491  anchor.basics.swap_file_sz = le.size;
494  sd->map->closeForWriting(fileNo);
495  ++counts.objcount;
496 }
497 
500 void
502 {
503  try {
504  finalizeOrThrow(fileNo, le);
505  } catch (const std::exception &ex) {
506  freeBadEntry(fileNo, ex.what());
507  }
508 }
509 
510 void
512 {
513  LoadingEntry entry = loadingEntry(fileNo);
514  switch (entry.state()) {
515 
517  finalizeOrFree(fileNo, entry);
518  break;
519 
520  case LoadingEntry::leEmpty: // no entry hashed to this position
521  case LoadingEntry::leLoaded: // we have already unlocked this entry
522  case LoadingEntry::leCorrupted: // we have already removed this entry
523  case LoadingEntry::leIgnored: // we have already discarded this entry
524  break;
525  }
526 }
527 
528 void
530 {
531  const LoadingSlot slot = loadingSlot(slotId);
532  // there should not be any unprocessed slots left
533  Must(slot.freed() || (slot.mapped() && slot.finalized()));
534 }
535 
538 void
539 Rock::Rebuild::freeBadEntry(const sfileno fileno, const char *eDescription)
540 {
541  debugs(47, 2, "cache_dir #" << sd->index << ' ' << eDescription <<
542  " entry " << fileno << " is ignored during rebuild");
543 
544  LoadingEntry le = loadingEntry(fileno);
546 
547  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileno);
548  assert(anchor.start < 0 || le.size > 0);
549  for (SlotId slotId = anchor.start; slotId >= 0;) {
550  const SlotId next = loadingSlot(slotId).more;
551  freeSlot(slotId, true);
552  slotId = next;
553  }
554 
555  sd->map->forgetWritingEntry(fileno);
556 }
557 
558 void
560 {
561  debugs(47,3, HERE << "cache_dir #" << sd->index << " rebuild level: " <<
565 }
566 
567 void
568 Rock::Rebuild::failure(const char *msg, int errNo)
569 {
570  debugs(47,5, sd->index << " slot " << loadingPos << " at " <<
571  dbOffset << " <= " << dbSize);
572 
573  if (errNo)
574  debugs(47, DBG_CRITICAL, "ERROR: Rock cache_dir rebuild failure: " << xstrerr(errNo));
575  debugs(47, DBG_CRITICAL, "Do you need to run 'squid -z' to initialize storage?");
576 
577  assert(sd);
578  fatalf("Rock cache_dir[%d] rebuild of %s failed: %s.",
579  sd->index, sd->filePath, msg);
580 }
581 
583 void
584 Rock::Rebuild::freeSlot(const SlotId slotId, const bool invalid)
585 {
586  debugs(47,5, sd->index << " frees slot " << slotId);
587  LoadingSlot slot = loadingSlot(slotId);
588  assert(!slot.freed());
589  slot.freed(true);
590 
591  if (invalid) {
592  ++counts.invalid;
593  //sd->unlink(fileno); leave garbage on disk, it should not hurt
594  }
595 
596  Ipc::Mem::PageId pageId;
597  pageId.pool = sd->index+1;
598  pageId.number = slotId+1;
599  sd->freeSlots->push(pageId);
600 }
601 
603 void
604 Rock::Rebuild::freeUnusedSlot(const SlotId slotId, const bool invalid)
605 {
606  LoadingSlot slot = loadingSlot(slotId);
607  // mapped slots must be freed via freeBadEntry() to keep the map in sync
608  assert(!slot.mapped());
609  freeSlot(slotId, invalid);
610 }
611 
613 void
614 Rock::Rebuild::mapSlot(const SlotId slotId, const DbCellHeader &header)
615 {
616  LoadingSlot slot = loadingSlot(slotId);
617  assert(!slot.mapped());
618  assert(!slot.freed());
619  slot.mapped(true);
620 
621  Ipc::StoreMapSlice slice;
622  slice.next = header.nextSlot;
623  slice.size = header.payloadSize;
624  sd->map->importSlice(slotId, slice);
625 }
626 
627 template <class SlotIdType> // accommodates atomic and simple SlotIds.
628 void
629 Rock::Rebuild::chainSlots(SlotIdType &from, const SlotId to)
630 {
631  LoadingSlot slot = loadingSlot(to);
632  assert(slot.more < 0);
633  slot.more = from; // may still be unset
634  from = to;
635 }
636 
639 void
640 Rock::Rebuild::addSlotToEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
641 {
642  LoadingEntry le = loadingEntry(fileno);
643  Ipc::StoreMapAnchor &anchor = sd->map->writeableEntry(fileno);
644 
645  debugs(47,9, "adding " << slotId << " to entry " << fileno);
646  // we do not need to preserve the order
647  if (le.anchored()) {
648  LoadingSlot inode = loadingSlot(anchor.start);
649  chainSlots(inode.more, slotId);
650  } else {
651  chainSlots(anchor.start, slotId);
652  }
653 
654  le.size += header.payloadSize; // must precede freeBadEntry() calls
655 
656  if (header.firstSlot == slotId) {
657  debugs(47,5, "added inode");
658 
659  if (le.anchored()) { // we have already added another inode slot
660  freeBadEntry(fileno, "inode conflict");
661  ++counts.clashcount;
662  return;
663  }
664 
665  le.anchored(true);
666 
667  if (!importEntry(anchor, fileno, header)) {
668  freeBadEntry(fileno, "corrupted metainfo");
669  return;
670  }
671 
672  // set total entry size and/or check it for consistency
673  if (const uint64_t totalSize = header.entrySize) {
674  assert(totalSize != static_cast<uint64_t>(-1));
675  if (!anchor.basics.swap_file_sz) {
676  anchor.basics.swap_file_sz = totalSize;
677  assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
678  } else if (totalSize != anchor.basics.swap_file_sz) {
679  freeBadEntry(fileno, "size mismatch");
680  return;
681  }
682  }
683  }
684 
685  const uint64_t totalSize = anchor.basics.swap_file_sz; // may be 0/unknown
686 
687  if (totalSize > 0 && le.size > totalSize) { // overflow
688  debugs(47, 8, "overflow: " << le.size << " > " << totalSize);
689  freeBadEntry(fileno, "overflowing");
690  return;
691  }
692 
693  mapSlot(slotId, header);
694  if (totalSize > 0 && le.size == totalSize)
695  finalizeOrFree(fileno, le); // entry is probably fully loaded now
696 }
697 
699 void
701 {
702  anchor.setKey(reinterpret_cast<const cache_key*>(header.key));
703  assert(header.firstSlot >= 0);
704  anchor.start = -1; // addSlotToEntry() will set it
705 
706  assert(anchor.basics.swap_file_sz != static_cast<uint64_t>(-1));
707 
708  LoadingEntry le = loadingEntry(fileno);
710  le.version = header.version;
711  le.size = 0;
712 }
713 
715 void
716 Rock::Rebuild::startNewEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
717 {
718  // A miss may have been stored at our fileno while we were loading other
719  // slots from disk. We ought to preserve that entry because it is fresher.
720  const bool overwriteExisting = false;
721  if (Ipc::StoreMap::Anchor *anchor = sd->map->openForWritingAt(fileno, overwriteExisting)) {
722  primeNewEntry(*anchor, fileno, header);
723  addSlotToEntry(fileno, slotId, header); // may fail
724  assert(anchor->basics.swap_file_sz != static_cast<uint64_t>(-1));
725  } else {
726  // A new from-network entry is occupying our map slot; let it be, but
727  // save us from the trouble of going through the above motions again.
728  LoadingEntry le = loadingEntry(fileno);
730  freeUnusedSlot(slotId, false);
731  }
732 }
733 
735 bool
736 Rock::Rebuild::sameEntry(const sfileno fileno, const DbCellHeader &header) const
737 {
738  // Header updates always result in multi-start chains and often
739  // result in multi-version chains so we can only compare the keys.
740  const Ipc::StoreMap::Anchor &anchor = sd->map->writeableEntry(fileno);
741  return anchor.sameKey(reinterpret_cast<const cache_key*>(header.key));
742 }
743 
745 void
746 Rock::Rebuild::useNewSlot(const SlotId slotId, const DbCellHeader &header)
747 {
748  const cache_key *const key =
749  reinterpret_cast<const cache_key*>(header.key);
750  const sfileno fileno = sd->map->fileNoByKey(key);
751  assert(0 <= fileno && fileno < dbEntryLimit);
752 
753  LoadingEntry le = loadingEntry(fileno);
754  debugs(47,9, "entry " << fileno << " state: " << le.state() << ", inode: " <<
755  header.firstSlot << ", size: " << header.payloadSize);
756 
757  switch (le.state()) {
758 
759  case LoadingEntry::leEmpty: {
760  startNewEntry(fileno, slotId, header);
761  break;
762  }
763 
765  if (sameEntry(fileno, header)) {
766  addSlotToEntry(fileno, slotId, header); // may fail
767  } else {
768  // either the loading chain or this slot is stale;
769  // be conservative and ignore both (and any future ones)
770  freeBadEntry(fileno, "duplicated");
771  freeUnusedSlot(slotId, true);
772  ++counts.dupcount;
773  }
774  break;
775  }
776 
777  case LoadingEntry::leLoaded: {
778  // either the previously loaded chain or this slot is stale;
779  // be conservative and ignore both (and any future ones)
781  sd->map->freeEntry(fileno); // may not be immediately successful
782  freeUnusedSlot(slotId, true);
783  ++counts.dupcount;
784  break;
785  }
786 
788  // previously seen slots messed things up so we must ignore this one
789  freeUnusedSlot(slotId, true);
790  break;
791  }
792 
794  // already replaced by a fresher or colliding from-network entry
795  freeUnusedSlot(slotId, false);
796  break;
797  }
798  }
799 }
800 
void addSlotToEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
Definition: RockRebuild.cc:640
uint8_t anchored
whether we loaded the inode slot for this entry
Definition: RockRebuild.cc:85
SwapDir * sd
Definition: RockRebuild.h:75
virtual bool doneAll() const
whether positive goal has been reached
Definition: AsyncJob.cc:96
void finalized(const bool beFinalized)
Definition: RockRebuild.cc:132
bool doneValidating() const
Definition: RockRebuild.cc:280
void mapped(const bool beMapped)
Definition: RockRebuild.cc:128
static int store_dirs_rebuilding
the number of cache_dirs being rebuilt; TODO: move to Disks::Rebuilding
Definition: Controller.h:134
std::atomic< uint64_t > swap_file_sz
Definition: StoreMap.h:93
virtual void swanSong() override
Definition: RockRebuild.cc:559
#define assert(EX)
Definition: assert.h:17
void finalizeOrThrow(const sfileno fileNo, LoadingEntry &le)
Definition: RockRebuild.cc:467
void useNewSlot(const SlotId slotId, const DbCellHeader &header)
handle freshly loaded (and validated) db slot header
Definition: RockRebuild.cc:746
struct Ipc::StoreMapAnchor::Basics basics
void finalizeOrFree(const sfileno fileNo, LoadingEntry &le)
Definition: RockRebuild.cc:501
void loadOneSlot()
Definition: RockRebuild.cc:362
std::vector< Ipc::StoreMapSliceId > mores
LoadingSlot::more for all slots.
Definition: RockRebuild.cc:163
#define CallJobHere(debugSection, debugLevel, job, Class, method)
Definition: AsyncJobCalls.h:57
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition: StoreMap.h:46
smart StoreEntry-level info pointer (hides anti-padding LoadingParts arrays)
Definition: RockRebuild.cc:94
void storeRebuildComplete(StoreRebuildData *dc)
bool storeRebuildParseEntry(MemBuf &buf, StoreEntry &tmpe, cache_key *key, StoreRebuildData &stats, uint64_t expectedSize)
parses entry buffer and validates entry metadata; fills e on success
virtual ~Rebuild() override
Definition: RockRebuild.cc:224
std::atomic< Size > size
slice contents size
Definition: StoreMap.h:45
unsigned char cache_key
Store key.
Definition: forward.h:29
State
possible store entry states during index rebuild
Definition: RockRebuild.cc:103
bool freed() const
Definition: RockRebuild.cc:135
void state(State aState) const
Definition: RockRebuild.cc:107
static int version
uint64_t slotSize
all db slots are of this size
Definition: RockSwapDir.h:79
uint64_t entrySize
total entry content size or zero if still unknown
Definition: RockDbCell.h:42
int file_open(const char *path, int mode)
Definition: fs_io.cc:46
void checkpoint()
continues after a pause if not done
Definition: RockRebuild.cc:267
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition: forward.h:30
#define Must(condition)
Like assert() but throws an exception instead of aborting the process.
Definition: TextException.h:69
void loadingSteps()
Definition: RockRebuild.cc:312
#define DBG_CRITICAL
Definition: Debug.h:45
sfileno nextSlot
slot ID of the next slot occupied by the entry
Definition: RockDbCell.h:46
uint32_t pool
page pool ID within Squid
Definition: Page.h:33
struct timeval current_time
Definition: stub_time.cc:15
int opt_foreground_rebuild
LoadingEntry loadingEntry(const sfileno fileNo)
Definition: RockRebuild.cc:347
bool importEntry(Ipc::StoreMapAnchor &anchor, const sfileno slotId, const DbCellHeader &header)
parse StoreEntry basics and add them to the map, returning true on success
Definition: RockRebuild.cc:406
void primeNewEntry(Ipc::StoreMapAnchor &anchor, const sfileno fileno, const DbCellHeader &header)
initialize housekeeping information for a newly accepted entry
Definition: RockRebuild.cc:700
void fatalf(const char *fmt,...)
Definition: fatal.cc:68
uint8_t freed
whether the slot was given to the map as free space
Definition: RockRebuild.cc:90
uint8_t mapped
whether the slot was added to a mapped entry
Definition: RockRebuild.cc:88
uint64_t & size
payload seen so far
Definition: RockRebuild.cc:99
low-level anti-padding storage class for LoadingEntry and LoadingSlot flags
Definition: RockRebuild.cc:78
bool anchored() const
Definition: RockRebuild.cc:110
Rebuild(SwapDir *dir)
Definition: RockRebuild.cc:203
bool sameKey(const cache_key *const aKey) const
Definition: StoreMap.cc:772
smart db slot-level info pointer (hides anti-padding LoadingParts arrays)
Definition: RockRebuild.cc:118
uint64_t swap_file_sz
Definition: Store.h:204
int tvSubMsec(struct timeval, struct timeval)
Definition: stub_time.cc:20
uint32_t number
page number within the segment
Definition: Page.h:35
const char * xstrerr(int error)
Definition: xstrerror.cc:83
LoadingFlags & flags
entry flags (see the above accessors) are ours
Definition: RockRebuild.cc:114
void const char HLPCB void * data
Definition: stub_helper.cc:16
int64_t dbSize
Definition: RockRebuild.h:78
bool sane(const size_t slotSize, int slotLimit) const
whether this slot is not corrupted
Definition: RockDbCell.h:33
#define debugs(SECTION, LEVEL, CONTENT)
Definition: Debug.h:124
#define DBG_IMPORTANT
Definition: Debug.h:46
std::vector< uint64_t > sizes
LoadingEntry::size for all entries.
Definition: RockRebuild.cc:159
uint32_t payloadSize
slot contents size, always positive
Definition: RockDbCell.h:43
void anchored(const bool beAnchored)
Definition: RockRebuild.cc:111
static StoreRebuildData counts
#define SQUID_MD5_DIGEST_LENGTH
Definition: md5.h:66
#define EBIT_CLR(flag, bit)
Definition: defines.h:106
LoadingParts(int dbSlotLimit, int dbEntryLimit)
Definition: RockRebuild.cc:190
static const int64_t HeaderSize
on-disk db header size
Definition: RockSwapDir.h:148
bool doneLoading() const
Definition: RockRebuild.cc:274
int dbSlotSize
the size of a db cell, including the cell header
Definition: RockRebuild.h:79
bool sameEntry(const sfileno fileno, const DbCellHeader &header) const
does the header belong to the fileno entry being loaded?
Definition: RockRebuild.cc:736
static void Steps(void *data)
Definition: RockRebuild.cc:294
bool used() const
Definition: RockRebuild.cc:138
int32_t StoreMapSliceId
Definition: StoreMap.h:24
uint64_t key[2]
StoreEntry key.
Definition: RockDbCell.h:41
int64_t diskOffsetLimit() const
Definition: RockSwapDir.cc:707
void freeBadEntry(const sfileno fileno, const char *eDescription)
Definition: RockRebuild.cc:539
uint8_t state
current entry state (one of the LoadingEntry::State values)
Definition: RockRebuild.cc:84
bool empty() const
true iff no entry occupies this slot
Definition: RockDbCell.h:28
signed_int32_t sfileno
Definition: forward.h:22
std::vector< uint32_t > versions
LoadingEntry::version for all entries.
Definition: RockRebuild.cc:160
int opt_store_doublecheck
#define SM_PAGE_SIZE
Definition: defines.h:102
uint32_t & version
DbCellHeader::version to distinguish same-URL chains.
Definition: RockRebuild.cc:100
bool UsingSmp()
Whether there should be more than one worker process running.
Definition: tools.cc:658
virtual bool doneAll() const override
whether positive goal has been reached
Definition: RockRebuild.cc:288
void const char * buf
Definition: stub_helper.cc:16
std::ostream & HERE(std::ostream &s)
Definition: Debug.h:153
Ipc::StoreMapSliceId & more
another slot in some chain belonging to the same entry (unordered!)
Definition: RockRebuild.cc:124
void startNewEntry(const sfileno fileno, const SlotId slotId, const DbCellHeader &header)
handle a slot from an entry that we have not seen before
Definition: RockRebuild.cc:716
time_t getCurrentTime(void)
Get current time.
void eventAdd(const char *name, EVH *func, void *arg, double when, int weight, bool cbdata)
Definition: event.cc:109
void validateOneEntry(const sfileno fileNo)
Definition: RockRebuild.cc:511
void freed(const bool beFreed)
Definition: RockRebuild.cc:136
void chainSlots(SlotIdType &from, const SlotId to)
Definition: RockRebuild.cc:629
#define EBIT_SET(flag, bit)
Definition: defines.h:105
LoadingSlot(const SlotId slotId, LoadingParts &source)
Definition: RockRebuild.cc:182
int dbSlotLimit
total number of db cells
Definition: RockRebuild.h:80
int64_t entryLimitActual() const
max number of possible entries in db
Definition: RockSwapDir.cc:206
void validateOneSlot(const SlotId slotId)
Definition: RockRebuild.cc:529
int dbEntryLimit
maximum number of entries that can be stored in db
Definition: RockRebuild.h:81
int64_t slotLimitActual() const
total number of slots in this db
Definition: RockSwapDir.cc:197
ssize_t mb_size_t
Definition: MemBuf.h:17
Shared memory page identifier, address, or handler.
Definition: Page.h:21
virtual void start() override
prepares and initiates entry loading sequence
Definition: RockRebuild.cc:233
LoadingSlot loadingSlot(const SlotId slotId)
Definition: RockRebuild.cc:354
void storeRebuildProgress(int sd_index, int total, int sofar)
void validationSteps()
Definition: RockRebuild.cc:431
void mapSlot(const SlotId slotId, const DbCellHeader &header)
adds slot to the entry chain in the map
Definition: RockRebuild.cc:614
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition: StoreMap.cc:779
State state() const
Definition: RockRebuild.cc:106
uint8_t finalized
whether finalizeOrThrow() has scanned the slot
Definition: RockRebuild.cc:89
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
#define O_BINARY
Definition: defines.h:204
void failure(const char *msg, int errNo=0)
Definition: RockRebuild.cc:568
void freeSlot(const SlotId slotId, const bool invalid)
adds slot to the free slot index
Definition: RockRebuild.cc:584
LoadingFlags & flags
slot flags (see the above accessors) are ours
Definition: RockRebuild.cc:141
std::vector< LoadingFlags > flags
all LoadingEntry and LoadingSlot flags
Definition: RockRebuild.cc:166
void file_close(int fd)
Definition: fs_io.cc:76
StoreRebuildData counts
Definition: RockRebuild.h:89
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
Definition: StoreMap.h:99
bool storeRebuildLoadEntry(int fd, int diskIndex, MemBuf &buf, StoreRebuildData &)
loads entry from disk; fills supplied memory buffer on success
uint32_t version
detects conflicts among same-key entries
Definition: RockDbCell.h:44
LoadingEntry(const sfileno fileNo, LoadingParts &source)
Definition: RockRebuild.cc:173
sfileno firstSlot
slot ID of the first slot occupied by the entry
Definition: RockDbCell.h:45
bool finalized() const
Definition: RockRebuild.cc:131
int size
Definition: ModDevPoll.cc:77
CBDATA_NAMESPACED_CLASS_INIT(Rock, Rebuild)
bool mapped() const
Definition: RockRebuild.cc:127
void freeUnusedSlot(const SlotId slotId, const bool invalid)
freeSlot() for never-been-mapped slots
Definition: RockRebuild.cc:604
void setKey(const cache_key *const aKey)
Definition: StoreMap.cc:765

 

Introduction

Documentation

Support

Miscellaneous

Web Site Translations

Mirrors