From 9facea14a76fd2bdb6e03e3498aa6a6e63e89c28 Mon Sep 17 00:00:00 2001 From: Kelsi Date: Wed, 6 May 2026 09:29:24 -0700 Subject: [PATCH] fix(dbc): reject absurd header values + use 64-bit size math MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DBCFile::load multiplied recordCount * recordSize as uint32 (line 108), so a header with recordCount=1B and recordSize=1024 would wrap to a tiny size — resize allocates ~tiny, memcpy reads ~TB of memory and crashes. Reject impossible header values up front (10M records / 1024 fields / 16KB record / 256MB string block) and use uint64_t for the file-size sanity check + size_t for the resize/memcpy product so the bounds-check is the only path that allows large counts. --- src/pipeline/dbc_loader.cpp | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/pipeline/dbc_loader.cpp b/src/pipeline/dbc_loader.cpp index 7b3fdc50..235856eb 100644 --- a/src/pipeline/dbc_loader.cpp +++ b/src/pipeline/dbc_loader.cpp @@ -68,8 +68,25 @@ bool DBCFile::load(const std::vector& dbcData) { recordSize = header.recordSize; stringBlockSize = header.stringBlockSize; - // Validate sizes - uint32_t expectedSize = sizeof(DBCHeader) + (recordCount * recordSize) + stringBlockSize; + // Reject absurd header values up front. Real DBCs cap at ~1M records + // and 1024 fields; large stringBlockSize is up to ~64MB. Multiplying + // these without bounds risks uint32 overflow on the totalRecordSize + // computation below — the resize would be tiny but the memcpy would + // read TB of memory. + if (recordCount > 10'000'000 || fieldCount > 1024 || + recordSize > 1024 * 4 || + stringBlockSize > 256u * 1024 * 1024) { + LOG_ERROR("DBC header rejected: recordCount=", recordCount, + " fieldCount=", fieldCount, " recordSize=", recordSize, + " stringBlockSize=", stringBlockSize); + return false; + } + + // Validate sizes — use uint64 for the product so the overflow check + // above is the only path that allows a large recordCount * recordSize. + uint64_t expectedSize = sizeof(DBCHeader) + + static_cast(recordCount) * recordSize + + stringBlockSize; if (dbcData.size() < expectedSize) { LOG_ERROR("DBC file truncated: expected ", expectedSize, " bytes, got ", dbcData.size()); return false; @@ -86,9 +103,10 @@ bool DBCFile::load(const std::vector& dbcData) { fieldCount, " fields, ", recordSize, " bytes/record, ", stringBlockSize, " string bytes"); - // Copy record data + // Copy record data. Use size_t for the product so it matches the + // header-validated 64-bit expectedSize math above. const uint8_t* recordStart = dbcData.data() + sizeof(DBCHeader); - uint32_t totalRecordSize = recordCount * recordSize; + size_t totalRecordSize = static_cast(recordCount) * recordSize; recordData.resize(totalRecordSize); if (totalRecordSize > 0) { std::memcpy(recordData.data(), recordStart, totalRecordSize);