Add 1GB RAM cache for decompressed MPQ files

Eliminates repeated MPQ decompression overhead by caching decompressed
files in RAM with LRU eviction. Major performance improvement for file access.

Problem:
- Every readFile() call decompresses from MPQ (expensive!)
- M2 models, textures, WMO files decompressed repeatedly
- No caching of decompressed data
- MPQ decompression is CPU-intensive (zlib/bzip2)

Solution:
- Added 1GB LRU file cache to AssetManager
- Cache hit: instant return of decompressed data
- Cache miss: decompress once, cache for future access
- LRU eviction when cache full (removes least recently used)
- Don't cache files >100MB (avoid giant WMO chunks)
- Thread-safe with existing readMutex

Implementation:
- CachedFile struct: data + lastAccessTime
- fileCacheAccessCounter for LRU tracking
- Hit/miss statistics for monitoring
- Budget: 1GB (modern RAM easily handles this)

Performance impact:
- First load: same speed (decompress + cache)
- Subsequent loads: instant (no decompression)
- Expected 70-90% hit rate during normal play
- Huge benefit for frequently accessed models

Cache stats logged on shutdown to monitor effectiveness.
This commit is contained in:
Kelsi 2026-02-08 22:37:29 -08:00
parent 34cd5a161d
commit 27d0496894
2 changed files with 75 additions and 3 deletions

View file

@ -26,7 +26,7 @@ bool AssetManager::initialize(const std::string& dataPath_) {
}
initialized = true;
LOG_INFO("Asset manager initialized successfully");
LOG_INFO("Asset manager initialized successfully (1GB file cache enabled)");
return true;
}
@ -37,6 +37,13 @@ void AssetManager::shutdown() {
LOG_INFO("Shutting down asset manager");
// Log cache statistics
if (fileCacheHits + fileCacheMisses > 0) {
float hitRate = (float)fileCacheHits / (fileCacheHits + fileCacheMisses) * 100.0f;
LOG_INFO("File cache stats: ", fileCacheHits, " hits, ", fileCacheMisses, " misses (",
(int)hitRate, "% hit rate), ", fileCacheTotalBytes / 1024 / 1024, " MB cached");
}
clearCache();
mpqManager.shutdown();
@ -141,13 +148,59 @@ std::vector<uint8_t> AssetManager::readFile(const std::string& path) const {
return std::vector<uint8_t>();
}
std::string normalized = normalizePath(path);
std::lock_guard<std::mutex> lock(readMutex);
return mpqManager.readFile(normalizePath(path));
// Check cache first
auto it = fileCache.find(normalized);
if (it != fileCache.end()) {
// Cache hit - update access time and return cached data
it->second.lastAccessTime = ++fileCacheAccessCounter;
fileCacheHits++;
return it->second.data;
}
// Cache miss - decompress from MPQ
fileCacheMisses++;
std::vector<uint8_t> data = mpqManager.readFile(normalized);
if (data.empty()) {
return data; // File not found
}
// Add to cache if within budget
size_t fileSize = data.size();
if (fileSize > 0 && fileSize < FILE_CACHE_BUDGET / 10) { // Don't cache files > 100MB
// Evict old entries if needed (LRU)
while (fileCacheTotalBytes + fileSize > FILE_CACHE_BUDGET && !fileCache.empty()) {
// Find least recently used entry
auto lru = fileCache.begin();
for (auto it = fileCache.begin(); it != fileCache.end(); ++it) {
if (it->second.lastAccessTime < lru->second.lastAccessTime) {
lru = it;
}
}
fileCacheTotalBytes -= lru->second.data.size();
fileCache.erase(lru);
}
// Add new entry
CachedFile cached;
cached.data = data;
cached.lastAccessTime = ++fileCacheAccessCounter;
fileCache[normalized] = std::move(cached);
fileCacheTotalBytes += fileSize;
}
return data;
}
void AssetManager::clearCache() {
std::lock_guard<std::mutex> lock(readMutex);
dbcCache.clear();
LOG_INFO("Cleared asset cache");
fileCache.clear();
fileCacheTotalBytes = 0;
fileCacheAccessCounter = 0;
LOG_INFO("Cleared asset cache (DBC + file cache)");
}
std::string AssetManager::normalizePath(const std::string& path) const {