diff --git src/fs/rock/RockSwapDir.cc src/fs/rock/RockSwapDir.cc index 8b17ec3..7fe8944 100644 --- src/fs/rock/RockSwapDir.cc +++ src/fs/rock/RockSwapDir.cc @@ -339,62 +339,59 @@ Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconf return true; } /// reports time-specific options; mimics ::SwapDir::optionObjectSizeDump() void Rock::SwapDir::dumpTimeOption(StoreEntry * e) const { if (fileConfig.ioTimeout) storeAppendPrintf(e, " swap-timeout=%"PRId64, static_cast(fileConfig.ioTimeout)); } /// check the results of the configuration; only level-0 debugging works here void Rock::SwapDir::validateOptions() { if (max_objsize <= 0) fatal("Rock store requires a positive max-size"); -#if THIS_CODE_IS_FIXED_AND_MOVED - // XXX: should not use map as it is not yet created - // XXX: max_size is in Bytes now - // XXX: Use DBG_IMPORTANT (and DBG_CRITICAL if opt_parse_cfg_only?) - // TODO: Shrink max_size to avoid waste? - const int64_t mapRoundWasteMx = max_objsize*sizeof(long)*8; - const int64_t sizeRoundWasteMx = 1024; // max_size stored in KB - const int64_t roundingWasteMx = max(mapRoundWasteMx, sizeRoundWasteMx); - const int64_t totalWaste = maxSize() - diskOffsetLimit(); - assert(diskOffsetLimit() <= maxSize()); + const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB + const int64_t maxObjectSizeRoundingWaste = maxObjectSize(); + const int64_t maxRoundingWaste = + max(maxSizeRoundingWaste, maxObjectSizeRoundingWaste); + const int64_t usableDiskSize = diskOffset(entryLimitAllowed()); + const int64_t diskWasteSize = maxSize() - usableDiskSize; + Must(diskWasteSize >= 0); // warn if maximum db size is not reachable due to sfileno limit - if (map->entryLimit() == entryLimitHigh() && totalWaste > roundingWasteMx) { - debugs(47, 0, "Rock store cache_dir[" << index << "]:"); - debugs(47, 0, "\tmaximum number of entries: " << map->entryLimit()); - debugs(47, 0, "\tmaximum entry size: " << max_objsize << " bytes"); - debugs(47, 0, "\tmaximum db size: " << maxSize() << " bytes"); - debugs(47, 0, "\tusable db size: " << diskOffsetLimit() << " bytes"); - debugs(47, 0, "\tdisk space waste: " << totalWaste << " bytes"); - debugs(47, 0, "WARNING: Rock store config wastes space."); + if (entryLimitAllowed() == entryLimitHigh() && + diskWasteSize >= maxRoundingWaste) { + debugs(47, DBG_CRITICAL, "Rock store cache_dir[" << index << "] '" << path << "':"); + debugs(47, DBG_CRITICAL, "\tmaximum number of entries: " << entryLimitAllowed()); + debugs(47, DBG_CRITICAL, "\tmaximum object size: " << maxObjectSize() << " Bytes"); + debugs(47, DBG_CRITICAL, "\tmaximum db size: " << maxSize() << " Bytes"); + debugs(47, DBG_CRITICAL, "\tusable db size: " << usableDiskSize << " Bytes"); + debugs(47, DBG_CRITICAL, "\tdisk space waste: " << diskWasteSize << " Bytes"); + debugs(47, DBG_CRITICAL, "WARNING: Rock store config wastes space."); } -#endif } void Rock::SwapDir::rebuild() { //++StoreController::store_dirs_rebuilding; // see Rock::SwapDir::init() AsyncJob::Start(new Rebuild(this)); } /* Add a new object to the cache with empty memory copy and pointer to disk * use to rebuild store from disk. Based on UFSSwapDir::addDiskRestore */ bool Rock::SwapDir::addEntry(const int filen, const DbCellHeader &header, const StoreEntry &from) { debugs(47, 8, HERE << &from << ' ' << from.getMD5Text() << ", filen="<< std::setfill('0') << std::hex << std::uppercase << std::setw(8) << filen); sfileno newLocation = 0; if (Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast(from.key), newLocation)) {