mirror of
https://github.com/Kelsidavis/WoWee.git
synced 2026-04-17 17:43:52 +00:00
Initial commit: wowee native WoW 3.3.5a client
This commit is contained in:
commit
ce6cb8f38e
147 changed files with 32347 additions and 0 deletions
564
src/pipeline/adt_loader.cpp
Normal file
564
src/pipeline/adt_loader.cpp
Normal file
|
|
@ -0,0 +1,564 @@
|
|||
#include "pipeline/adt_loader.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cstring>
|
||||
#include <cmath>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
// HeightMap implementation
|
||||
float HeightMap::getHeight(int x, int y) const {
|
||||
if (x < 0 || x > 8 || y < 0 || y > 8) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
// WoW uses 9x9 outer + 8x8 inner vertex layout
|
||||
// Outer vertices: 0-80 (9x9 grid)
|
||||
// Inner vertices: 81-144 (8x8 grid between outer vertices)
|
||||
|
||||
// Calculate index based on vertex type
|
||||
int index;
|
||||
if (x < 9 && y < 9) {
|
||||
// Outer vertex
|
||||
index = y * 9 + x;
|
||||
} else {
|
||||
// Inner vertex (between outer vertices)
|
||||
int innerX = x - 1;
|
||||
int innerY = y - 1;
|
||||
if (innerX >= 0 && innerX < 8 && innerY >= 0 && innerY < 8) {
|
||||
index = 81 + innerY * 8 + innerX;
|
||||
} else {
|
||||
return 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
return heights[index];
|
||||
}
|
||||
|
||||
// ADTLoader implementation
|
||||
ADTTerrain ADTLoader::load(const std::vector<uint8_t>& adtData) {
|
||||
ADTTerrain terrain;
|
||||
|
||||
if (adtData.empty()) {
|
||||
LOG_ERROR("Empty ADT data");
|
||||
return terrain;
|
||||
}
|
||||
|
||||
LOG_INFO("Loading ADT terrain (", adtData.size(), " bytes)");
|
||||
|
||||
size_t offset = 0;
|
||||
int chunkIndex = 0;
|
||||
|
||||
// Parse chunks
|
||||
int totalChunks = 0;
|
||||
while (offset < adtData.size()) {
|
||||
ChunkHeader header;
|
||||
if (!readChunkHeader(adtData.data(), offset, adtData.size(), header)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const uint8_t* chunkData = adtData.data() + offset + 8;
|
||||
size_t chunkSize = header.size;
|
||||
|
||||
totalChunks++;
|
||||
if (totalChunks <= 5) {
|
||||
// Log first few chunks for debugging
|
||||
char magic[5] = {0};
|
||||
std::memcpy(magic, &header.magic, 4);
|
||||
LOG_INFO("Chunk #", totalChunks, ": magic=", magic,
|
||||
" (0x", std::hex, header.magic, std::dec, "), size=", chunkSize);
|
||||
}
|
||||
|
||||
// Parse based on chunk type
|
||||
if (header.magic == MVER) {
|
||||
parseMVER(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MTEX) {
|
||||
parseMTEX(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MMDX) {
|
||||
parseMMDX(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MWMO) {
|
||||
parseMWMO(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MDDF) {
|
||||
parseMDDF(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MODF) {
|
||||
parseMODF(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MH2O) {
|
||||
LOG_INFO("Found MH2O chunk (", chunkSize, " bytes)");
|
||||
parseMH2O(chunkData, chunkSize, terrain);
|
||||
}
|
||||
else if (header.magic == MCNK) {
|
||||
parseMCNK(chunkData, chunkSize, chunkIndex++, terrain);
|
||||
}
|
||||
|
||||
// Move to next chunk
|
||||
offset += 8 + chunkSize;
|
||||
}
|
||||
|
||||
terrain.loaded = true;
|
||||
LOG_INFO("ADT loaded: ", chunkIndex, " map chunks, ",
|
||||
terrain.textures.size(), " textures, ",
|
||||
terrain.doodadNames.size(), " doodads, ",
|
||||
terrain.wmoNames.size(), " WMOs");
|
||||
|
||||
return terrain;
|
||||
}
|
||||
|
||||
bool ADTLoader::readChunkHeader(const uint8_t* data, size_t offset, size_t dataSize, ChunkHeader& header) {
|
||||
if (offset + 8 > dataSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
header.magic = readUInt32(data, offset);
|
||||
header.size = readUInt32(data, offset + 4);
|
||||
|
||||
// Validate chunk size
|
||||
if (offset + 8 + header.size > dataSize) {
|
||||
LOG_WARNING("Chunk extends beyond file: magic=0x", std::hex, header.magic,
|
||||
", size=", std::dec, header.size);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
uint32_t ADTLoader::readUInt32(const uint8_t* data, size_t offset) {
|
||||
uint32_t value;
|
||||
std::memcpy(&value, data + offset, sizeof(uint32_t));
|
||||
return value;
|
||||
}
|
||||
|
||||
float ADTLoader::readFloat(const uint8_t* data, size_t offset) {
|
||||
float value;
|
||||
std::memcpy(&value, data + offset, sizeof(float));
|
||||
return value;
|
||||
}
|
||||
|
||||
uint16_t ADTLoader::readUInt16(const uint8_t* data, size_t offset) {
|
||||
uint16_t value;
|
||||
std::memcpy(&value, data + offset, sizeof(uint16_t));
|
||||
return value;
|
||||
}
|
||||
|
||||
void ADTLoader::parseMVER(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
if (size < 4) {
|
||||
LOG_WARNING("MVER chunk too small");
|
||||
return;
|
||||
}
|
||||
|
||||
terrain.version = readUInt32(data, 0);
|
||||
LOG_DEBUG("ADT version: ", terrain.version);
|
||||
}
|
||||
|
||||
void ADTLoader::parseMTEX(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MTEX contains null-terminated texture filenames
|
||||
size_t offset = 0;
|
||||
|
||||
while (offset < size) {
|
||||
const char* textureName = reinterpret_cast<const char*>(data + offset);
|
||||
size_t nameLen = std::strlen(textureName);
|
||||
|
||||
if (nameLen == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
terrain.textures.push_back(std::string(textureName, nameLen));
|
||||
offset += nameLen + 1; // +1 for null terminator
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loaded ", terrain.textures.size(), " texture names");
|
||||
}
|
||||
|
||||
void ADTLoader::parseMMDX(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MMDX contains null-terminated M2 model filenames
|
||||
size_t offset = 0;
|
||||
|
||||
while (offset < size) {
|
||||
const char* modelName = reinterpret_cast<const char*>(data + offset);
|
||||
size_t nameLen = std::strlen(modelName);
|
||||
|
||||
if (nameLen == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
terrain.doodadNames.push_back(std::string(modelName, nameLen));
|
||||
offset += nameLen + 1;
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loaded ", terrain.doodadNames.size(), " doodad names");
|
||||
}
|
||||
|
||||
void ADTLoader::parseMWMO(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MWMO contains null-terminated WMO filenames
|
||||
size_t offset = 0;
|
||||
|
||||
while (offset < size) {
|
||||
const char* wmoName = reinterpret_cast<const char*>(data + offset);
|
||||
size_t nameLen = std::strlen(wmoName);
|
||||
|
||||
if (nameLen == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
terrain.wmoNames.push_back(std::string(wmoName, nameLen));
|
||||
offset += nameLen + 1;
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loaded ", terrain.wmoNames.size(), " WMO names");
|
||||
for (size_t i = 0; i < terrain.wmoNames.size(); i++) {
|
||||
LOG_INFO(" WMO[", i, "]: ", terrain.wmoNames[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void ADTLoader::parseMDDF(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MDDF contains doodad placements (36 bytes each)
|
||||
const size_t entrySize = 36;
|
||||
size_t count = size / entrySize;
|
||||
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
size_t offset = i * entrySize;
|
||||
|
||||
ADTTerrain::DoodadPlacement placement;
|
||||
placement.nameId = readUInt32(data, offset);
|
||||
placement.uniqueId = readUInt32(data, offset + 4);
|
||||
placement.position[0] = readFloat(data, offset + 8);
|
||||
placement.position[1] = readFloat(data, offset + 12);
|
||||
placement.position[2] = readFloat(data, offset + 16);
|
||||
placement.rotation[0] = readFloat(data, offset + 20);
|
||||
placement.rotation[1] = readFloat(data, offset + 24);
|
||||
placement.rotation[2] = readFloat(data, offset + 28);
|
||||
placement.scale = readUInt16(data, offset + 32);
|
||||
placement.flags = readUInt16(data, offset + 34);
|
||||
|
||||
terrain.doodadPlacements.push_back(placement);
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded ", terrain.doodadPlacements.size(), " doodad placements");
|
||||
}
|
||||
|
||||
void ADTLoader::parseMODF(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MODF contains WMO placements (64 bytes each)
|
||||
const size_t entrySize = 64;
|
||||
size_t count = size / entrySize;
|
||||
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
size_t offset = i * entrySize;
|
||||
|
||||
ADTTerrain::WMOPlacement placement;
|
||||
placement.nameId = readUInt32(data, offset);
|
||||
placement.uniqueId = readUInt32(data, offset + 4);
|
||||
placement.position[0] = readFloat(data, offset + 8);
|
||||
placement.position[1] = readFloat(data, offset + 12);
|
||||
placement.position[2] = readFloat(data, offset + 16);
|
||||
placement.rotation[0] = readFloat(data, offset + 20);
|
||||
placement.rotation[1] = readFloat(data, offset + 24);
|
||||
placement.rotation[2] = readFloat(data, offset + 28);
|
||||
placement.extentLower[0] = readFloat(data, offset + 32);
|
||||
placement.extentLower[1] = readFloat(data, offset + 36);
|
||||
placement.extentLower[2] = readFloat(data, offset + 40);
|
||||
placement.extentUpper[0] = readFloat(data, offset + 44);
|
||||
placement.extentUpper[1] = readFloat(data, offset + 48);
|
||||
placement.extentUpper[2] = readFloat(data, offset + 52);
|
||||
placement.flags = readUInt16(data, offset + 56);
|
||||
placement.doodadSet = readUInt16(data, offset + 58);
|
||||
|
||||
terrain.wmoPlacements.push_back(placement);
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded ", terrain.wmoPlacements.size(), " WMO placements");
|
||||
}
|
||||
|
||||
void ADTLoader::parseMCNK(const uint8_t* data, size_t size, int chunkIndex, ADTTerrain& terrain) {
|
||||
if (chunkIndex < 0 || chunkIndex >= 256) {
|
||||
LOG_WARNING("Invalid chunk index: ", chunkIndex);
|
||||
return;
|
||||
}
|
||||
|
||||
MapChunk& chunk = terrain.chunks[chunkIndex];
|
||||
|
||||
// Read MCNK header (128 bytes)
|
||||
if (size < 128) {
|
||||
LOG_WARNING("MCNK chunk too small");
|
||||
return;
|
||||
}
|
||||
|
||||
chunk.flags = readUInt32(data, 0);
|
||||
chunk.indexX = readUInt32(data, 4);
|
||||
chunk.indexY = readUInt32(data, 8);
|
||||
|
||||
// Read holes mask (at offset 0x3C = 60 in MCNK header)
|
||||
// Each bit represents a 2x2 block of the 8x8 quad grid
|
||||
chunk.holes = readUInt16(data, 60);
|
||||
|
||||
// Read layer count and offsets from MCNK header
|
||||
uint32_t nLayers = readUInt32(data, 12);
|
||||
uint32_t ofsHeight = readUInt32(data, 20); // MCVT offset
|
||||
uint32_t ofsNormal = readUInt32(data, 24); // MCNR offset
|
||||
uint32_t ofsLayer = readUInt32(data, 28); // MCLY offset
|
||||
uint32_t ofsAlpha = readUInt32(data, 36); // MCAL offset
|
||||
uint32_t sizeAlpha = readUInt32(data, 40);
|
||||
|
||||
// Debug first chunk only
|
||||
if (chunkIndex == 0) {
|
||||
LOG_INFO("MCNK[0] offsets: nLayers=", nLayers,
|
||||
" height=", ofsHeight, " normal=", ofsNormal,
|
||||
" layer=", ofsLayer, " alpha=", ofsAlpha,
|
||||
" sizeAlpha=", sizeAlpha, " size=", size,
|
||||
" holes=0x", std::hex, chunk.holes, std::dec);
|
||||
}
|
||||
|
||||
// Position (stored at offset 0x68 = 104 in MCNK header)
|
||||
chunk.position[0] = readFloat(data, 104); // X
|
||||
chunk.position[1] = readFloat(data, 108); // Y
|
||||
chunk.position[2] = readFloat(data, 112); // Z
|
||||
|
||||
// Parse sub-chunks using offsets from MCNK header
|
||||
// WoW ADT sub-chunks may have their own 8-byte headers (magic+size)
|
||||
// Check by inspecting the first 4 bytes at the offset
|
||||
|
||||
// Height map (MCVT) - 145 floats = 580 bytes
|
||||
if (ofsHeight > 0 && ofsHeight + 580 <= size) {
|
||||
// Check if this points to a sub-chunk header (magic "MCVT" = 0x4D435654)
|
||||
uint32_t possibleMagic = readUInt32(data, ofsHeight);
|
||||
uint32_t headerSkip = 0;
|
||||
if (possibleMagic == MCVT) {
|
||||
headerSkip = 8; // Skip magic + size
|
||||
if (chunkIndex == 0) {
|
||||
LOG_INFO("MCNK sub-chunks have headers (MCVT magic found at offset ", ofsHeight, ")");
|
||||
}
|
||||
}
|
||||
parseMCVT(data + ofsHeight + headerSkip, 580, chunk);
|
||||
}
|
||||
|
||||
// Normals (MCNR) - 145 normals (3 bytes each) + 13 padding = 448 bytes
|
||||
if (ofsNormal > 0 && ofsNormal + 448 <= size) {
|
||||
uint32_t possibleMagic = readUInt32(data, ofsNormal);
|
||||
uint32_t skip = (possibleMagic == MCNR) ? 8 : 0;
|
||||
parseMCNR(data + ofsNormal + skip, 448, chunk);
|
||||
}
|
||||
|
||||
// Texture layers (MCLY) - 16 bytes per layer
|
||||
if (ofsLayer > 0 && nLayers > 0) {
|
||||
size_t layerSize = nLayers * 16;
|
||||
uint32_t possibleMagic = readUInt32(data, ofsLayer);
|
||||
uint32_t skip = (possibleMagic == MCLY) ? 8 : 0;
|
||||
if (ofsLayer + skip + layerSize <= size) {
|
||||
parseMCLY(data + ofsLayer + skip, layerSize, chunk);
|
||||
}
|
||||
}
|
||||
|
||||
// Alpha maps (MCAL) - variable size from header
|
||||
if (ofsAlpha > 0 && sizeAlpha > 0 && ofsAlpha + sizeAlpha <= size) {
|
||||
uint32_t possibleMagic = readUInt32(data, ofsAlpha);
|
||||
uint32_t skip = (possibleMagic == MCAL) ? 8 : 0;
|
||||
parseMCAL(data + ofsAlpha + skip, sizeAlpha - skip, chunk);
|
||||
}
|
||||
}
|
||||
|
||||
void ADTLoader::parseMCVT(const uint8_t* data, size_t size, MapChunk& chunk) {
|
||||
// MCVT contains 145 height values (floats)
|
||||
if (size < 145 * sizeof(float)) {
|
||||
LOG_WARNING("MCVT chunk too small: ", size, " bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
float minHeight = 999999.0f;
|
||||
float maxHeight = -999999.0f;
|
||||
|
||||
for (int i = 0; i < 145; i++) {
|
||||
float height = readFloat(data, i * sizeof(float));
|
||||
chunk.heightMap.heights[i] = height;
|
||||
|
||||
if (height < minHeight) minHeight = height;
|
||||
if (height > maxHeight) maxHeight = height;
|
||||
}
|
||||
|
||||
// Log height range for first chunk only
|
||||
static bool logged = false;
|
||||
if (!logged) {
|
||||
LOG_DEBUG("MCVT height range: [", minHeight, ", ", maxHeight, "]");
|
||||
logged = true;
|
||||
}
|
||||
}
|
||||
|
||||
void ADTLoader::parseMCNR(const uint8_t* data, size_t size, MapChunk& chunk) {
|
||||
// MCNR contains 145 normals (3 bytes each, signed)
|
||||
if (size < 145 * 3) {
|
||||
LOG_WARNING("MCNR chunk too small: ", size, " bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 145 * 3; i++) {
|
||||
chunk.normals[i] = static_cast<int8_t>(data[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void ADTLoader::parseMCLY(const uint8_t* data, size_t size, MapChunk& chunk) {
|
||||
// MCLY contains texture layer definitions (16 bytes each)
|
||||
size_t layerCount = size / 16;
|
||||
|
||||
if (layerCount > 4) {
|
||||
LOG_WARNING("More than 4 texture layers: ", layerCount);
|
||||
layerCount = 4;
|
||||
}
|
||||
|
||||
static int layerLogCount = 0;
|
||||
for (size_t i = 0; i < layerCount; i++) {
|
||||
TextureLayer layer;
|
||||
|
||||
layer.textureId = readUInt32(data, i * 16 + 0);
|
||||
layer.flags = readUInt32(data, i * 16 + 4);
|
||||
layer.offsetMCAL = readUInt32(data, i * 16 + 8);
|
||||
layer.effectId = readUInt32(data, i * 16 + 12);
|
||||
|
||||
if (layerLogCount < 10) {
|
||||
LOG_INFO(" MCLY[", i, "]: texId=", layer.textureId,
|
||||
" flags=0x", std::hex, layer.flags, std::dec,
|
||||
" alphaOfs=", layer.offsetMCAL,
|
||||
" useAlpha=", layer.useAlpha(),
|
||||
" compressed=", layer.compressedAlpha());
|
||||
layerLogCount++;
|
||||
}
|
||||
|
||||
chunk.layers.push_back(layer);
|
||||
}
|
||||
}
|
||||
|
||||
void ADTLoader::parseMCAL(const uint8_t* data, size_t size, MapChunk& chunk) {
|
||||
// MCAL contains alpha maps for texture layers
|
||||
// Store raw data; decompression happens per-layer during mesh generation
|
||||
chunk.alphaMap.resize(size);
|
||||
std::memcpy(chunk.alphaMap.data(), data, size);
|
||||
}
|
||||
|
||||
void ADTLoader::parseMH2O(const uint8_t* data, size_t size, ADTTerrain& terrain) {
|
||||
// MH2O contains water/liquid data for all 256 map chunks
|
||||
// Structure: 256 SMLiquidChunk headers followed by instance data
|
||||
|
||||
// Each SMLiquidChunk header is 12 bytes (WotLK 3.3.5a):
|
||||
// - uint32_t offsetInstances (offset from MH2O chunk start)
|
||||
// - uint32_t layerCount
|
||||
// - uint32_t offsetAttributes (offset from MH2O chunk start)
|
||||
|
||||
const size_t headerSize = 12; // SMLiquidChunk size for WotLK
|
||||
const size_t totalHeaderSize = 256 * headerSize;
|
||||
|
||||
if (size < totalHeaderSize) {
|
||||
LOG_WARNING("MH2O chunk too small for headers: ", size, " bytes");
|
||||
return;
|
||||
}
|
||||
|
||||
int totalLayers = 0;
|
||||
|
||||
for (int chunkIdx = 0; chunkIdx < 256; chunkIdx++) {
|
||||
size_t headerOffset = chunkIdx * headerSize;
|
||||
|
||||
uint32_t offsetInstances = readUInt32(data, headerOffset);
|
||||
uint32_t layerCount = readUInt32(data, headerOffset + 4);
|
||||
// uint32_t offsetAttributes = readUInt32(data, headerOffset + 8); // Not used
|
||||
|
||||
if (layerCount == 0 || offsetInstances == 0) {
|
||||
continue; // No water in this chunk
|
||||
}
|
||||
|
||||
// Sanity checks
|
||||
if (offsetInstances >= size) {
|
||||
continue;
|
||||
}
|
||||
if (layerCount > 16) {
|
||||
// Sanity check - max 16 layers per chunk is reasonable
|
||||
LOG_WARNING("MH2O: Invalid layer count ", layerCount, " for chunk ", chunkIdx);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse each liquid layer (SMLiquidInstance - 24 bytes)
|
||||
for (uint32_t layerIdx = 0; layerIdx < layerCount; layerIdx++) {
|
||||
size_t instanceOffset = offsetInstances + layerIdx * 24;
|
||||
|
||||
if (instanceOffset + 24 > size) {
|
||||
break;
|
||||
}
|
||||
|
||||
ADTTerrain::WaterLayer layer;
|
||||
layer.liquidType = readUInt16(data, instanceOffset);
|
||||
uint16_t liquidObject = readUInt16(data, instanceOffset + 2); // LVF format flags
|
||||
layer.minHeight = readFloat(data, instanceOffset + 4);
|
||||
layer.maxHeight = readFloat(data, instanceOffset + 8);
|
||||
layer.x = data[instanceOffset + 12];
|
||||
layer.y = data[instanceOffset + 13];
|
||||
layer.width = data[instanceOffset + 14];
|
||||
layer.height = data[instanceOffset + 15];
|
||||
uint32_t offsetExistsBitmap = readUInt32(data, instanceOffset + 16);
|
||||
uint32_t offsetVertexData = readUInt32(data, instanceOffset + 20);
|
||||
|
||||
// Skip invalid layers
|
||||
if (layer.width == 0 || layer.height == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Clamp dimensions to valid range
|
||||
if (layer.width > 8) layer.width = 8;
|
||||
if (layer.height > 8) layer.height = 8;
|
||||
if (layer.x + layer.width > 8) layer.width = 8 - layer.x;
|
||||
if (layer.y + layer.height > 8) layer.height = 8 - layer.y;
|
||||
|
||||
// Read exists bitmap (which tiles have water)
|
||||
// The bitmap is (width * height) bits, packed into bytes
|
||||
size_t numTiles = layer.width * layer.height;
|
||||
size_t bitmapBytes = (numTiles + 7) / 8;
|
||||
|
||||
// Note: offsets in SMLiquidInstance are relative to MH2O chunk start
|
||||
if (offsetExistsBitmap > 0) {
|
||||
size_t bitmapOffset = offsetExistsBitmap;
|
||||
if (bitmapOffset + bitmapBytes <= size) {
|
||||
layer.mask.resize(bitmapBytes);
|
||||
std::memcpy(layer.mask.data(), data + bitmapOffset, bitmapBytes);
|
||||
}
|
||||
} else {
|
||||
// No bitmap means all tiles have water
|
||||
layer.mask.resize(bitmapBytes, 0xFF);
|
||||
}
|
||||
|
||||
// Read vertex heights
|
||||
// Number of vertices is (width+1) * (height+1)
|
||||
size_t numVertices = (layer.width + 1) * (layer.height + 1);
|
||||
|
||||
// Check liquid object flags (LVF) to determine vertex format
|
||||
bool hasHeightData = (liquidObject != 2); // LVF_height_depth or LVF_height_texcoord
|
||||
|
||||
if (hasHeightData && offsetVertexData > 0) {
|
||||
size_t vertexOffset = offsetVertexData;
|
||||
size_t vertexDataSize = numVertices * sizeof(float);
|
||||
|
||||
if (vertexOffset + vertexDataSize <= size) {
|
||||
layer.heights.resize(numVertices);
|
||||
for (size_t i = 0; i < numVertices; i++) {
|
||||
layer.heights[i] = readFloat(data, vertexOffset + i * sizeof(float));
|
||||
}
|
||||
} else {
|
||||
// Offset out of bounds - use flat water
|
||||
layer.heights.resize(numVertices, layer.minHeight);
|
||||
}
|
||||
} else {
|
||||
// No height data - use flat surface at minHeight
|
||||
layer.heights.resize(numVertices, layer.minHeight);
|
||||
}
|
||||
|
||||
// Default flags
|
||||
layer.flags = 0;
|
||||
|
||||
terrain.waterData[chunkIdx].layers.push_back(layer);
|
||||
totalLayers++;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded MH2O water data: ", totalLayers, " liquid layers across ", size, " bytes");
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
154
src/pipeline/asset_manager.cpp
Normal file
154
src/pipeline/asset_manager.cpp
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
#include "pipeline/asset_manager.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <algorithm>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
AssetManager::AssetManager() = default;
|
||||
AssetManager::~AssetManager() {
|
||||
shutdown();
|
||||
}
|
||||
|
||||
bool AssetManager::initialize(const std::string& dataPath_) {
|
||||
if (initialized) {
|
||||
LOG_WARNING("AssetManager already initialized");
|
||||
return true;
|
||||
}
|
||||
|
||||
dataPath = dataPath_;
|
||||
LOG_INFO("Initializing asset manager with data path: ", dataPath);
|
||||
|
||||
// Initialize MPQ manager
|
||||
if (!mpqManager.initialize(dataPath)) {
|
||||
LOG_ERROR("Failed to initialize MPQ manager");
|
||||
return false;
|
||||
}
|
||||
|
||||
initialized = true;
|
||||
LOG_INFO("Asset manager initialized successfully");
|
||||
return true;
|
||||
}
|
||||
|
||||
void AssetManager::shutdown() {
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_INFO("Shutting down asset manager");
|
||||
|
||||
clearCache();
|
||||
mpqManager.shutdown();
|
||||
|
||||
initialized = false;
|
||||
}
|
||||
|
||||
BLPImage AssetManager::loadTexture(const std::string& path) {
|
||||
if (!initialized) {
|
||||
LOG_ERROR("AssetManager not initialized");
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
// Normalize path
|
||||
std::string normalizedPath = normalizePath(path);
|
||||
|
||||
LOG_DEBUG("Loading texture: ", normalizedPath);
|
||||
|
||||
// Read BLP file from MPQ
|
||||
std::vector<uint8_t> blpData = mpqManager.readFile(normalizedPath);
|
||||
if (blpData.empty()) {
|
||||
LOG_WARNING("Texture not found: ", normalizedPath);
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
// Load BLP
|
||||
BLPImage image = BLPLoader::load(blpData);
|
||||
if (!image.isValid()) {
|
||||
LOG_ERROR("Failed to load texture: ", normalizedPath);
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded texture: ", normalizedPath, " (", image.width, "x", image.height, ")");
|
||||
return image;
|
||||
}
|
||||
|
||||
std::shared_ptr<DBCFile> AssetManager::loadDBC(const std::string& name) {
|
||||
if (!initialized) {
|
||||
LOG_ERROR("AssetManager not initialized");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Check cache first
|
||||
auto it = dbcCache.find(name);
|
||||
if (it != dbcCache.end()) {
|
||||
LOG_DEBUG("DBC already loaded (cached): ", name);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loading DBC: ", name);
|
||||
|
||||
// Construct DBC path (DBFilesClient directory)
|
||||
std::string dbcPath = "DBFilesClient\\" + name;
|
||||
|
||||
// Read DBC file from MPQ
|
||||
std::vector<uint8_t> dbcData = mpqManager.readFile(dbcPath);
|
||||
if (dbcData.empty()) {
|
||||
LOG_WARNING("DBC not found: ", dbcPath);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Load DBC
|
||||
auto dbc = std::make_shared<DBCFile>();
|
||||
if (!dbc->load(dbcData)) {
|
||||
LOG_ERROR("Failed to load DBC: ", dbcPath);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Cache the DBC
|
||||
dbcCache[name] = dbc;
|
||||
|
||||
LOG_INFO("Loaded DBC: ", name, " (", dbc->getRecordCount(), " records)");
|
||||
return dbc;
|
||||
}
|
||||
|
||||
std::shared_ptr<DBCFile> AssetManager::getDBC(const std::string& name) const {
|
||||
auto it = dbcCache.find(name);
|
||||
if (it != dbcCache.end()) {
|
||||
return it->second;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool AssetManager::fileExists(const std::string& path) const {
|
||||
if (!initialized) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return mpqManager.fileExists(normalizePath(path));
|
||||
}
|
||||
|
||||
std::vector<uint8_t> AssetManager::readFile(const std::string& path) const {
|
||||
if (!initialized) {
|
||||
return std::vector<uint8_t>();
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(readMutex);
|
||||
return mpqManager.readFile(normalizePath(path));
|
||||
}
|
||||
|
||||
void AssetManager::clearCache() {
|
||||
dbcCache.clear();
|
||||
LOG_INFO("Cleared asset cache");
|
||||
}
|
||||
|
||||
std::string AssetManager::normalizePath(const std::string& path) const {
|
||||
std::string normalized = path;
|
||||
|
||||
// Convert forward slashes to backslashes (WoW uses backslashes)
|
||||
std::replace(normalized.begin(), normalized.end(), '/', '\\');
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
437
src/pipeline/blp_loader.cpp
Normal file
437
src/pipeline/blp_loader.cpp
Normal file
|
|
@ -0,0 +1,437 @@
|
|||
#include "pipeline/blp_loader.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
BLPImage BLPLoader::load(const std::vector<uint8_t>& blpData) {
|
||||
if (blpData.size() < 8) { // Minimum: magic + first field
|
||||
LOG_ERROR("BLP data too small");
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
const uint8_t* data = blpData.data();
|
||||
const char* magic = reinterpret_cast<const char*>(data);
|
||||
|
||||
// Check magic number
|
||||
if (std::memcmp(magic, "BLP1", 4) == 0) {
|
||||
return loadBLP1(data, blpData.size());
|
||||
} else if (std::memcmp(magic, "BLP2", 4) == 0) {
|
||||
return loadBLP2(data, blpData.size());
|
||||
} else if (std::memcmp(magic, "BLP0", 4) == 0) {
|
||||
LOG_WARNING("BLP0 format not fully supported");
|
||||
return BLPImage();
|
||||
} else {
|
||||
LOG_ERROR("Invalid BLP magic: ", std::string(magic, 4));
|
||||
return BLPImage();
|
||||
}
|
||||
}
|
||||
|
||||
BLPImage BLPLoader::loadBLP1(const uint8_t* data, size_t size) {
|
||||
// BLP1 header has all uint32 fields (different layout from BLP2)
|
||||
const BLP1Header* header = reinterpret_cast<const BLP1Header*>(data);
|
||||
|
||||
BLPImage image;
|
||||
image.format = BLPFormat::BLP1;
|
||||
image.width = header->width;
|
||||
image.height = header->height;
|
||||
image.channels = 4;
|
||||
image.mipLevels = header->hasMips ? 16 : 1;
|
||||
|
||||
// BLP1 compression: 0=JPEG (not used in WoW), 1=palette/indexed
|
||||
// BLP1 does NOT support DXT — only palette with optional alpha
|
||||
if (header->compression == 1) {
|
||||
image.compression = BLPCompression::PALETTE;
|
||||
} else if (header->compression == 0) {
|
||||
LOG_WARNING("BLP1 JPEG compression not supported");
|
||||
return BLPImage();
|
||||
} else {
|
||||
LOG_WARNING("BLP1 unknown compression: ", header->compression);
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loading BLP1: ", image.width, "x", image.height, " ",
|
||||
getCompressionName(image.compression), " alpha=", header->alphaBits);
|
||||
|
||||
// Get first mipmap (full resolution)
|
||||
uint32_t offset = header->mipOffsets[0];
|
||||
uint32_t mipSize = header->mipSizes[0];
|
||||
|
||||
if (offset + mipSize > size) {
|
||||
LOG_ERROR("BLP1 mipmap data out of bounds (offset=", offset, " size=", mipSize, " fileSize=", size, ")");
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
const uint8_t* mipData = data + offset;
|
||||
|
||||
// Allocate output buffer
|
||||
int pixelCount = image.width * image.height;
|
||||
image.data.resize(pixelCount * 4); // RGBA8
|
||||
|
||||
decompressPalette(mipData, image.data.data(), header->palette,
|
||||
image.width, image.height, static_cast<uint8_t>(header->alphaBits));
|
||||
|
||||
return image;
|
||||
}
|
||||
|
||||
BLPImage BLPLoader::loadBLP2(const uint8_t* data, size_t size) {
|
||||
// BLP2 header has uint8 fields for compression/alpha/encoding
|
||||
const BLP2Header* header = reinterpret_cast<const BLP2Header*>(data);
|
||||
|
||||
BLPImage image;
|
||||
image.format = BLPFormat::BLP2;
|
||||
image.width = header->width;
|
||||
image.height = header->height;
|
||||
image.channels = 4;
|
||||
image.mipLevels = header->hasMips ? 16 : 1;
|
||||
|
||||
// BLP2 compression types:
|
||||
// 1 = palette/uncompressed
|
||||
// 2 = DXTC (DXT1/DXT3/DXT5 based on alphaDepth + alphaEncoding)
|
||||
// 3 = plain A8R8G8B8
|
||||
if (header->compression == 1) {
|
||||
image.compression = BLPCompression::PALETTE;
|
||||
} else if (header->compression == 2) {
|
||||
// BLP2 DXTC format selection based on alphaDepth + alphaEncoding:
|
||||
// alphaDepth=0 → DXT1 (no alpha)
|
||||
// alphaDepth>0, alphaEncoding=0 → DXT1 (1-bit alpha)
|
||||
// alphaDepth>0, alphaEncoding=1 → DXT3 (explicit 4-bit alpha)
|
||||
// alphaDepth>0, alphaEncoding=7 → DXT5 (interpolated alpha)
|
||||
if (header->alphaDepth == 0 || header->alphaEncoding == 0) {
|
||||
image.compression = BLPCompression::DXT1;
|
||||
} else if (header->alphaEncoding == 1) {
|
||||
image.compression = BLPCompression::DXT3;
|
||||
} else if (header->alphaEncoding == 7) {
|
||||
image.compression = BLPCompression::DXT5;
|
||||
} else {
|
||||
image.compression = BLPCompression::DXT1;
|
||||
}
|
||||
} else if (header->compression == 3) {
|
||||
image.compression = BLPCompression::ARGB8888;
|
||||
} else {
|
||||
image.compression = BLPCompression::ARGB8888;
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loading BLP2: ", image.width, "x", image.height, " ",
|
||||
getCompressionName(image.compression),
|
||||
" (comp=", (int)header->compression, " alphaDepth=", (int)header->alphaDepth,
|
||||
" alphaEnc=", (int)header->alphaEncoding, " mipOfs=", header->mipOffsets[0],
|
||||
" mipSize=", header->mipSizes[0], ")");
|
||||
|
||||
// Get first mipmap (full resolution)
|
||||
uint32_t offset = header->mipOffsets[0];
|
||||
uint32_t mipSize = header->mipSizes[0];
|
||||
|
||||
if (offset + mipSize > size) {
|
||||
LOG_ERROR("BLP2 mipmap data out of bounds");
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
const uint8_t* mipData = data + offset;
|
||||
|
||||
// Allocate output buffer
|
||||
int pixelCount = image.width * image.height;
|
||||
image.data.resize(pixelCount * 4); // RGBA8
|
||||
|
||||
switch (image.compression) {
|
||||
case BLPCompression::DXT1:
|
||||
decompressDXT1(mipData, image.data.data(), image.width, image.height);
|
||||
break;
|
||||
|
||||
case BLPCompression::DXT3:
|
||||
decompressDXT3(mipData, image.data.data(), image.width, image.height);
|
||||
break;
|
||||
|
||||
case BLPCompression::DXT5:
|
||||
decompressDXT5(mipData, image.data.data(), image.width, image.height);
|
||||
break;
|
||||
|
||||
case BLPCompression::PALETTE:
|
||||
decompressPalette(mipData, image.data.data(), header->palette,
|
||||
image.width, image.height, header->alphaDepth);
|
||||
break;
|
||||
|
||||
case BLPCompression::ARGB8888:
|
||||
for (int i = 0; i < pixelCount; i++) {
|
||||
image.data[i * 4 + 0] = mipData[i * 4 + 2]; // R
|
||||
image.data[i * 4 + 1] = mipData[i * 4 + 1]; // G
|
||||
image.data[i * 4 + 2] = mipData[i * 4 + 0]; // B
|
||||
image.data[i * 4 + 3] = mipData[i * 4 + 3]; // A
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
LOG_ERROR("Unsupported BLP2 compression type");
|
||||
return BLPImage();
|
||||
}
|
||||
|
||||
// DXT1 with alphaDepth=0 has no meaningful alpha channel, but the DXT1
|
||||
// color-key mode can produce alpha=0 pixels. Force all alpha to 255.
|
||||
if (header->alphaDepth == 0) {
|
||||
for (int i = 0; i < pixelCount; i++) {
|
||||
image.data[i * 4 + 3] = 255;
|
||||
}
|
||||
}
|
||||
|
||||
return image;
|
||||
}
|
||||
|
||||
void BLPLoader::decompressDXT1(const uint8_t* src, uint8_t* dst, int width, int height) {
|
||||
// DXT1 decompression (8 bytes per 4x4 block)
|
||||
int blockWidth = (width + 3) / 4;
|
||||
int blockHeight = (height + 3) / 4;
|
||||
|
||||
for (int by = 0; by < blockHeight; by++) {
|
||||
for (int bx = 0; bx < blockWidth; bx++) {
|
||||
const uint8_t* block = src + (by * blockWidth + bx) * 8;
|
||||
|
||||
// Read color endpoints (RGB565)
|
||||
uint16_t c0 = block[0] | (block[1] << 8);
|
||||
uint16_t c1 = block[2] | (block[3] << 8);
|
||||
|
||||
// Convert RGB565 to RGB888
|
||||
uint8_t r0 = ((c0 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g0 = ((c0 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b0 = (c0 & 0x1F) * 255 / 31;
|
||||
|
||||
uint8_t r1 = ((c1 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g1 = ((c1 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b1 = (c1 & 0x1F) * 255 / 31;
|
||||
|
||||
// Read 4x4 color indices (2 bits per pixel)
|
||||
uint32_t indices = block[4] | (block[5] << 8) | (block[6] << 16) | (block[7] << 24);
|
||||
|
||||
// Decompress 4x4 block
|
||||
for (int py = 0; py < 4; py++) {
|
||||
for (int px = 0; px < 4; px++) {
|
||||
int x = bx * 4 + px;
|
||||
int y = by * 4 + py;
|
||||
|
||||
if (x >= width || y >= height) continue;
|
||||
|
||||
int index = (indices >> ((py * 4 + px) * 2)) & 0x3;
|
||||
uint8_t* pixel = dst + (y * width + x) * 4;
|
||||
|
||||
// Interpolate colors based on index
|
||||
if (c0 > c1) {
|
||||
switch (index) {
|
||||
case 0: pixel[0] = r0; pixel[1] = g0; pixel[2] = b0; pixel[3] = 255; break;
|
||||
case 1: pixel[0] = r1; pixel[1] = g1; pixel[2] = b1; pixel[3] = 255; break;
|
||||
case 2: pixel[0] = (2*r0 + r1) / 3; pixel[1] = (2*g0 + g1) / 3; pixel[2] = (2*b0 + b1) / 3; pixel[3] = 255; break;
|
||||
case 3: pixel[0] = (r0 + 2*r1) / 3; pixel[1] = (g0 + 2*g1) / 3; pixel[2] = (b0 + 2*b1) / 3; pixel[3] = 255; break;
|
||||
}
|
||||
} else {
|
||||
switch (index) {
|
||||
case 0: pixel[0] = r0; pixel[1] = g0; pixel[2] = b0; pixel[3] = 255; break;
|
||||
case 1: pixel[0] = r1; pixel[1] = g1; pixel[2] = b1; pixel[3] = 255; break;
|
||||
case 2: pixel[0] = (r0 + r1) / 2; pixel[1] = (g0 + g1) / 2; pixel[2] = (b0 + b1) / 2; pixel[3] = 255; break;
|
||||
case 3: pixel[0] = 0; pixel[1] = 0; pixel[2] = 0; pixel[3] = 0; break; // Transparent
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BLPLoader::decompressDXT3(const uint8_t* src, uint8_t* dst, int width, int height) {
|
||||
// DXT3 decompression (16 bytes per 4x4 block - 8 bytes alpha + 8 bytes color)
|
||||
int blockWidth = (width + 3) / 4;
|
||||
int blockHeight = (height + 3) / 4;
|
||||
|
||||
for (int by = 0; by < blockHeight; by++) {
|
||||
for (int bx = 0; bx < blockWidth; bx++) {
|
||||
const uint8_t* block = src + (by * blockWidth + bx) * 16;
|
||||
|
||||
// First 8 bytes: 4-bit alpha values
|
||||
uint64_t alphaBlock = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
alphaBlock |= (uint64_t)block[i] << (i * 8);
|
||||
}
|
||||
|
||||
// Color block (same as DXT1) starts at byte 8
|
||||
const uint8_t* colorBlock = block + 8;
|
||||
|
||||
uint16_t c0 = colorBlock[0] | (colorBlock[1] << 8);
|
||||
uint16_t c1 = colorBlock[2] | (colorBlock[3] << 8);
|
||||
|
||||
uint8_t r0 = ((c0 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g0 = ((c0 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b0 = (c0 & 0x1F) * 255 / 31;
|
||||
|
||||
uint8_t r1 = ((c1 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g1 = ((c1 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b1 = (c1 & 0x1F) * 255 / 31;
|
||||
|
||||
uint32_t indices = colorBlock[4] | (colorBlock[5] << 8) | (colorBlock[6] << 16) | (colorBlock[7] << 24);
|
||||
|
||||
for (int py = 0; py < 4; py++) {
|
||||
for (int px = 0; px < 4; px++) {
|
||||
int x = bx * 4 + px;
|
||||
int y = by * 4 + py;
|
||||
|
||||
if (x >= width || y >= height) continue;
|
||||
|
||||
int index = (indices >> ((py * 4 + px) * 2)) & 0x3;
|
||||
uint8_t* pixel = dst + (y * width + x) * 4;
|
||||
|
||||
// DXT3 always uses 4-color mode for the color portion
|
||||
switch (index) {
|
||||
case 0: pixel[0] = r0; pixel[1] = g0; pixel[2] = b0; break;
|
||||
case 1: pixel[0] = r1; pixel[1] = g1; pixel[2] = b1; break;
|
||||
case 2: pixel[0] = (2*r0 + r1) / 3; pixel[1] = (2*g0 + g1) / 3; pixel[2] = (2*b0 + b1) / 3; break;
|
||||
case 3: pixel[0] = (r0 + 2*r1) / 3; pixel[1] = (g0 + 2*g1) / 3; pixel[2] = (b0 + 2*b1) / 3; break;
|
||||
}
|
||||
|
||||
// Apply 4-bit alpha
|
||||
int alphaIndex = py * 4 + px;
|
||||
uint8_t alpha4 = (alphaBlock >> (alphaIndex * 4)) & 0xF;
|
||||
pixel[3] = alpha4 * 255 / 15;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BLPLoader::decompressDXT5(const uint8_t* src, uint8_t* dst, int width, int height) {
|
||||
// DXT5 decompression (16 bytes per 4x4 block - interpolated alpha + color)
|
||||
int blockWidth = (width + 3) / 4;
|
||||
int blockHeight = (height + 3) / 4;
|
||||
|
||||
for (int by = 0; by < blockHeight; by++) {
|
||||
for (int bx = 0; bx < blockWidth; bx++) {
|
||||
const uint8_t* block = src + (by * blockWidth + bx) * 16;
|
||||
|
||||
// Alpha endpoints
|
||||
uint8_t alpha0 = block[0];
|
||||
uint8_t alpha1 = block[1];
|
||||
|
||||
// Build alpha lookup table
|
||||
uint8_t alphas[8];
|
||||
alphas[0] = alpha0;
|
||||
alphas[1] = alpha1;
|
||||
if (alpha0 > alpha1) {
|
||||
alphas[2] = (6*alpha0 + 1*alpha1) / 7;
|
||||
alphas[3] = (5*alpha0 + 2*alpha1) / 7;
|
||||
alphas[4] = (4*alpha0 + 3*alpha1) / 7;
|
||||
alphas[5] = (3*alpha0 + 4*alpha1) / 7;
|
||||
alphas[6] = (2*alpha0 + 5*alpha1) / 7;
|
||||
alphas[7] = (1*alpha0 + 6*alpha1) / 7;
|
||||
} else {
|
||||
alphas[2] = (4*alpha0 + 1*alpha1) / 5;
|
||||
alphas[3] = (3*alpha0 + 2*alpha1) / 5;
|
||||
alphas[4] = (2*alpha0 + 3*alpha1) / 5;
|
||||
alphas[5] = (1*alpha0 + 4*alpha1) / 5;
|
||||
alphas[6] = 0;
|
||||
alphas[7] = 255;
|
||||
}
|
||||
|
||||
// Alpha indices (48 bits for 16 pixels, 3 bits each)
|
||||
uint64_t alphaIndices = 0;
|
||||
for (int i = 2; i < 8; i++) {
|
||||
alphaIndices |= (uint64_t)block[i] << ((i - 2) * 8);
|
||||
}
|
||||
|
||||
// Color block (same as DXT1) starts at byte 8
|
||||
const uint8_t* colorBlock = block + 8;
|
||||
|
||||
uint16_t c0 = colorBlock[0] | (colorBlock[1] << 8);
|
||||
uint16_t c1 = colorBlock[2] | (colorBlock[3] << 8);
|
||||
|
||||
uint8_t r0 = ((c0 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g0 = ((c0 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b0 = (c0 & 0x1F) * 255 / 31;
|
||||
|
||||
uint8_t r1 = ((c1 >> 11) & 0x1F) * 255 / 31;
|
||||
uint8_t g1 = ((c1 >> 5) & 0x3F) * 255 / 63;
|
||||
uint8_t b1 = (c1 & 0x1F) * 255 / 31;
|
||||
|
||||
uint32_t indices = colorBlock[4] | (colorBlock[5] << 8) | (colorBlock[6] << 16) | (colorBlock[7] << 24);
|
||||
|
||||
for (int py = 0; py < 4; py++) {
|
||||
for (int px = 0; px < 4; px++) {
|
||||
int x = bx * 4 + px;
|
||||
int y = by * 4 + py;
|
||||
|
||||
if (x >= width || y >= height) continue;
|
||||
|
||||
int index = (indices >> ((py * 4 + px) * 2)) & 0x3;
|
||||
uint8_t* pixel = dst + (y * width + x) * 4;
|
||||
|
||||
// DXT5 always uses 4-color mode for the color portion
|
||||
switch (index) {
|
||||
case 0: pixel[0] = r0; pixel[1] = g0; pixel[2] = b0; break;
|
||||
case 1: pixel[0] = r1; pixel[1] = g1; pixel[2] = b1; break;
|
||||
case 2: pixel[0] = (2*r0 + r1) / 3; pixel[1] = (2*g0 + g1) / 3; pixel[2] = (2*b0 + b1) / 3; break;
|
||||
case 3: pixel[0] = (r0 + 2*r1) / 3; pixel[1] = (g0 + 2*g1) / 3; pixel[2] = (b0 + 2*b1) / 3; break;
|
||||
}
|
||||
|
||||
// Apply interpolated alpha
|
||||
int alphaIdx = (alphaIndices >> ((py * 4 + px) * 3)) & 0x7;
|
||||
pixel[3] = alphas[alphaIdx];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BLPLoader::decompressPalette(const uint8_t* src, uint8_t* dst, const uint32_t* palette, int width, int height, uint8_t alphaDepth) {
|
||||
int pixelCount = width * height;
|
||||
|
||||
// Palette indices are first (1 byte per pixel)
|
||||
const uint8_t* indices = src;
|
||||
// Alpha data follows the palette indices
|
||||
const uint8_t* alphaData = src + pixelCount;
|
||||
|
||||
for (int i = 0; i < pixelCount; i++) {
|
||||
uint8_t index = indices[i];
|
||||
uint32_t color = palette[index];
|
||||
|
||||
// Palette stores BGR (the high byte is typically 0, not alpha)
|
||||
dst[i * 4 + 0] = (color >> 16) & 0xFF; // R
|
||||
dst[i * 4 + 1] = (color >> 8) & 0xFF; // G
|
||||
dst[i * 4 + 2] = color & 0xFF; // B
|
||||
|
||||
// Alpha is stored separately after the index data
|
||||
if (alphaDepth == 8) {
|
||||
dst[i * 4 + 3] = alphaData[i];
|
||||
} else if (alphaDepth == 4) {
|
||||
// 4-bit alpha: 2 pixels per byte
|
||||
uint8_t alphaByte = alphaData[i / 2];
|
||||
dst[i * 4 + 3] = (i % 2 == 0) ? ((alphaByte & 0x0F) * 17) : ((alphaByte >> 4) * 17);
|
||||
} else if (alphaDepth == 1) {
|
||||
// 1-bit alpha: 8 pixels per byte
|
||||
uint8_t alphaByte = alphaData[i / 8];
|
||||
dst[i * 4 + 3] = ((alphaByte >> (i % 8)) & 1) ? 255 : 0;
|
||||
} else {
|
||||
// No alpha channel: fully opaque
|
||||
dst[i * 4 + 3] = 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const char* BLPLoader::getFormatName(BLPFormat format) {
|
||||
switch (format) {
|
||||
case BLPFormat::BLP0: return "BLP0";
|
||||
case BLPFormat::BLP1: return "BLP1";
|
||||
case BLPFormat::BLP2: return "BLP2";
|
||||
default: return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
const char* BLPLoader::getCompressionName(BLPCompression compression) {
|
||||
switch (compression) {
|
||||
case BLPCompression::NONE: return "None";
|
||||
case BLPCompression::PALETTE: return "Palette";
|
||||
case BLPCompression::DXT1: return "DXT1";
|
||||
case BLPCompression::DXT3: return "DXT3";
|
||||
case BLPCompression::DXT5: return "DXT5";
|
||||
case BLPCompression::ARGB8888: return "ARGB8888";
|
||||
default: return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
162
src/pipeline/dbc_loader.cpp
Normal file
162
src/pipeline/dbc_loader.cpp
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
#include "pipeline/dbc_loader.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cstring>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
DBCFile::DBCFile() = default;
|
||||
DBCFile::~DBCFile() = default;
|
||||
|
||||
bool DBCFile::load(const std::vector<uint8_t>& dbcData) {
|
||||
if (dbcData.size() < sizeof(DBCHeader)) {
|
||||
LOG_ERROR("DBC data too small for header");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Read header
|
||||
const DBCHeader* header = reinterpret_cast<const DBCHeader*>(dbcData.data());
|
||||
|
||||
// Verify magic
|
||||
if (std::memcmp(header->magic, "WDBC", 4) != 0) {
|
||||
LOG_ERROR("Invalid DBC magic: ", std::string(header->magic, 4));
|
||||
return false;
|
||||
}
|
||||
|
||||
recordCount = header->recordCount;
|
||||
fieldCount = header->fieldCount;
|
||||
recordSize = header->recordSize;
|
||||
stringBlockSize = header->stringBlockSize;
|
||||
|
||||
// Validate sizes
|
||||
uint32_t expectedSize = sizeof(DBCHeader) + (recordCount * recordSize) + stringBlockSize;
|
||||
if (dbcData.size() < expectedSize) {
|
||||
LOG_ERROR("DBC file truncated: expected ", expectedSize, " bytes, got ", dbcData.size());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Validate record size matches field count
|
||||
if (recordSize != fieldCount * 4) {
|
||||
LOG_WARNING("DBC record size mismatch: recordSize=", recordSize,
|
||||
" but fieldCount*4=", fieldCount * 4);
|
||||
}
|
||||
|
||||
LOG_DEBUG("Loading DBC: ", recordCount, " records, ",
|
||||
fieldCount, " fields, ", recordSize, " bytes/record, ",
|
||||
stringBlockSize, " string bytes");
|
||||
|
||||
// Copy record data
|
||||
const uint8_t* recordStart = dbcData.data() + sizeof(DBCHeader);
|
||||
uint32_t totalRecordSize = recordCount * recordSize;
|
||||
recordData.resize(totalRecordSize);
|
||||
std::memcpy(recordData.data(), recordStart, totalRecordSize);
|
||||
|
||||
// Copy string block
|
||||
const uint8_t* stringStart = recordStart + totalRecordSize;
|
||||
stringBlock.resize(stringBlockSize);
|
||||
if (stringBlockSize > 0) {
|
||||
std::memcpy(stringBlock.data(), stringStart, stringBlockSize);
|
||||
}
|
||||
|
||||
loaded = true;
|
||||
idCacheBuilt = false;
|
||||
idToIndexCache.clear();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const uint8_t* DBCFile::getRecord(uint32_t index) const {
|
||||
if (!loaded || index >= recordCount) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return recordData.data() + (index * recordSize);
|
||||
}
|
||||
|
||||
uint32_t DBCFile::getUInt32(uint32_t recordIndex, uint32_t fieldIndex) const {
|
||||
if (!loaded || recordIndex >= recordCount || fieldIndex >= fieldCount) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uint8_t* record = getRecord(recordIndex);
|
||||
if (!record) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const uint32_t* field = reinterpret_cast<const uint32_t*>(record + (fieldIndex * 4));
|
||||
return *field;
|
||||
}
|
||||
|
||||
int32_t DBCFile::getInt32(uint32_t recordIndex, uint32_t fieldIndex) const {
|
||||
return static_cast<int32_t>(getUInt32(recordIndex, fieldIndex));
|
||||
}
|
||||
|
||||
float DBCFile::getFloat(uint32_t recordIndex, uint32_t fieldIndex) const {
|
||||
if (!loaded || recordIndex >= recordCount || fieldIndex >= fieldCount) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
const uint8_t* record = getRecord(recordIndex);
|
||||
if (!record) {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
const float* field = reinterpret_cast<const float*>(record + (fieldIndex * 4));
|
||||
return *field;
|
||||
}
|
||||
|
||||
std::string DBCFile::getString(uint32_t recordIndex, uint32_t fieldIndex) const {
|
||||
uint32_t offset = getUInt32(recordIndex, fieldIndex);
|
||||
return getStringByOffset(offset);
|
||||
}
|
||||
|
||||
std::string DBCFile::getStringByOffset(uint32_t offset) const {
|
||||
if (!loaded || offset >= stringBlockSize) {
|
||||
return "";
|
||||
}
|
||||
|
||||
// Find null terminator
|
||||
const char* str = reinterpret_cast<const char*>(stringBlock.data() + offset);
|
||||
const char* end = reinterpret_cast<const char*>(stringBlock.data() + stringBlockSize);
|
||||
|
||||
// Find string length (up to null terminator or end of block)
|
||||
size_t length = 0;
|
||||
while (str + length < end && str[length] != '\0') {
|
||||
length++;
|
||||
}
|
||||
|
||||
return std::string(str, length);
|
||||
}
|
||||
|
||||
int32_t DBCFile::findRecordById(uint32_t id) const {
|
||||
if (!loaded) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Build ID cache if not already built
|
||||
if (!idCacheBuilt) {
|
||||
buildIdCache();
|
||||
}
|
||||
|
||||
auto it = idToIndexCache.find(id);
|
||||
if (it != idToIndexCache.end()) {
|
||||
return static_cast<int32_t>(it->second);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void DBCFile::buildIdCache() const {
|
||||
idToIndexCache.clear();
|
||||
|
||||
for (uint32_t i = 0; i < recordCount; i++) {
|
||||
uint32_t id = getUInt32(i, 0); // Assume first field is ID
|
||||
idToIndexCache[id] = i;
|
||||
}
|
||||
|
||||
idCacheBuilt = true;
|
||||
LOG_DEBUG("Built DBC ID cache with ", idToIndexCache.size(), " entries");
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
744
src/pipeline/m2_loader.cpp
Normal file
744
src/pipeline/m2_loader.cpp
Normal file
|
|
@ -0,0 +1,744 @@
|
|||
/**
|
||||
* M2 Model Loader — Binary parser for WoW's M2 model format (WotLK 3.3.5a)
|
||||
*
|
||||
* M2 files contain skeletal-animated meshes used for characters, creatures,
|
||||
* and doodads. The format stores geometry, bones with animation tracks,
|
||||
* textures, and material batches. A companion .skin file holds the rendering
|
||||
* batches and submesh definitions.
|
||||
*
|
||||
* Key format details:
|
||||
* - On-disk bone struct is 88 bytes (includes 3 animation track headers).
|
||||
* - Animation tracks use an "array-of-arrays" indirection: the header points
|
||||
* to N sub-array headers, each being {uint32 count, uint32 offset}.
|
||||
* - Rotation tracks store compressed quaternions as int16[4], decoded with
|
||||
* an offset mapping (not simple division).
|
||||
* - Skin file indices use two-level indirection: triangle → vertex lookup
|
||||
* table → global vertex index.
|
||||
* - Skin batch struct is 24 bytes on disk — the geosetIndex field at offset 10
|
||||
* is easily missed, causing a 2-byte alignment shift on all subsequent fields.
|
||||
*
|
||||
* Reference: https://wowdev.wiki/M2
|
||||
*/
|
||||
#include "pipeline/m2_loader.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
namespace {
|
||||
|
||||
// M2 file header structure (version 260+ for WotLK 3.3.5a)
|
||||
struct M2Header {
|
||||
char magic[4]; // 'MD20'
|
||||
uint32_t version;
|
||||
uint32_t nameLength;
|
||||
uint32_t nameOffset;
|
||||
uint32_t globalFlags;
|
||||
|
||||
uint32_t nGlobalSequences;
|
||||
uint32_t ofsGlobalSequences;
|
||||
uint32_t nAnimations;
|
||||
uint32_t ofsAnimations;
|
||||
uint32_t nAnimationLookup;
|
||||
uint32_t ofsAnimationLookup;
|
||||
|
||||
uint32_t nBones;
|
||||
uint32_t ofsBones;
|
||||
uint32_t nKeyBoneLookup;
|
||||
uint32_t ofsKeyBoneLookup;
|
||||
|
||||
uint32_t nVertices;
|
||||
uint32_t ofsVertices;
|
||||
uint32_t nViews; // Number of skin files
|
||||
|
||||
uint32_t nColors;
|
||||
uint32_t ofsColors;
|
||||
uint32_t nTextures;
|
||||
uint32_t ofsTextures;
|
||||
|
||||
uint32_t nTransparency;
|
||||
uint32_t ofsTransparency;
|
||||
uint32_t nUVAnimation;
|
||||
uint32_t ofsUVAnimation;
|
||||
uint32_t nTexReplace;
|
||||
uint32_t ofsTexReplace;
|
||||
|
||||
uint32_t nRenderFlags;
|
||||
uint32_t ofsRenderFlags;
|
||||
uint32_t nBoneLookupTable;
|
||||
uint32_t ofsBoneLookupTable;
|
||||
uint32_t nTexLookup;
|
||||
uint32_t ofsTexLookup;
|
||||
|
||||
uint32_t nTexUnits;
|
||||
uint32_t ofsTexUnits;
|
||||
uint32_t nTransLookup;
|
||||
uint32_t ofsTransLookup;
|
||||
uint32_t nUVAnimLookup;
|
||||
uint32_t ofsUVAnimLookup;
|
||||
|
||||
float vertexBox[6]; // Bounding box
|
||||
float vertexRadius;
|
||||
float boundingBox[6];
|
||||
float boundingRadius;
|
||||
|
||||
uint32_t nBoundingTriangles;
|
||||
uint32_t ofsBoundingTriangles;
|
||||
uint32_t nBoundingVertices;
|
||||
uint32_t ofsBoundingVertices;
|
||||
uint32_t nBoundingNormals;
|
||||
uint32_t ofsBoundingNormals;
|
||||
|
||||
uint32_t nAttachments;
|
||||
uint32_t ofsAttachments;
|
||||
uint32_t nAttachmentLookup;
|
||||
uint32_t ofsAttachmentLookup;
|
||||
};
|
||||
|
||||
// M2 vertex structure (on-disk format)
|
||||
struct M2VertexDisk {
|
||||
float pos[3];
|
||||
uint8_t boneWeights[4];
|
||||
uint8_t boneIndices[4];
|
||||
float normal[3];
|
||||
float texCoords[2][2];
|
||||
};
|
||||
|
||||
// M2 animation track header (on-disk, 20 bytes)
|
||||
struct M2TrackDisk {
|
||||
uint16_t interpolationType;
|
||||
int16_t globalSequence;
|
||||
uint32_t nTimestamps;
|
||||
uint32_t ofsTimestamps;
|
||||
uint32_t nKeys;
|
||||
uint32_t ofsKeys;
|
||||
};
|
||||
|
||||
// Full M2 bone structure (on-disk, 88 bytes)
|
||||
struct M2BoneDisk {
|
||||
int32_t keyBoneId; // 4
|
||||
uint32_t flags; // 4
|
||||
int16_t parentBone; // 2
|
||||
uint16_t submeshId; // 2
|
||||
uint32_t boneNameCRC; // 4
|
||||
M2TrackDisk translation; // 20
|
||||
M2TrackDisk rotation; // 20
|
||||
M2TrackDisk scale; // 20
|
||||
float pivot[3]; // 12
|
||||
}; // Total: 88
|
||||
|
||||
// M2 animation sequence structure
|
||||
struct M2SequenceDisk {
|
||||
uint16_t id;
|
||||
uint16_t variationIndex;
|
||||
uint32_t duration;
|
||||
float movingSpeed;
|
||||
uint32_t flags;
|
||||
int16_t frequency;
|
||||
uint16_t padding;
|
||||
uint32_t replayMin;
|
||||
uint32_t replayMax;
|
||||
uint32_t blendTime;
|
||||
float bounds[6];
|
||||
float boundRadius;
|
||||
int16_t nextAnimation;
|
||||
uint16_t aliasNext;
|
||||
};
|
||||
|
||||
// M2 texture definition
|
||||
struct M2TextureDisk {
|
||||
uint32_t type;
|
||||
uint32_t flags;
|
||||
uint32_t nameLength;
|
||||
uint32_t nameOffset;
|
||||
};
|
||||
|
||||
// Skin file header (contains rendering batches)
|
||||
struct M2SkinHeader {
|
||||
char magic[4]; // 'SKIN'
|
||||
uint32_t nIndices;
|
||||
uint32_t ofsIndices;
|
||||
uint32_t nTriangles;
|
||||
uint32_t ofsTriangles;
|
||||
uint32_t nVertexProperties;
|
||||
uint32_t ofsVertexProperties;
|
||||
uint32_t nSubmeshes;
|
||||
uint32_t ofsSubmeshes;
|
||||
uint32_t nBatches;
|
||||
uint32_t ofsBatches;
|
||||
uint32_t nBones;
|
||||
};
|
||||
|
||||
// Skin submesh structure (48 bytes for WotLK)
|
||||
struct M2SkinSubmesh {
|
||||
uint16_t id;
|
||||
uint16_t level;
|
||||
uint16_t vertexStart;
|
||||
uint16_t vertexCount;
|
||||
uint16_t indexStart;
|
||||
uint16_t indexCount;
|
||||
uint16_t boneCount;
|
||||
uint16_t boneStart;
|
||||
uint16_t boneInfluences;
|
||||
uint16_t centerBoneIndex;
|
||||
float centerPosition[3];
|
||||
float sortCenterPosition[3];
|
||||
float sortRadius;
|
||||
};
|
||||
|
||||
// Skin batch structure (24 bytes on disk)
|
||||
struct M2BatchDisk {
|
||||
uint8_t flags;
|
||||
int8_t priorityPlane;
|
||||
uint16_t shader;
|
||||
uint16_t skinSectionIndex;
|
||||
uint16_t geosetIndex; // Geoset index (not same as submesh ID)
|
||||
uint16_t colorIndex;
|
||||
uint16_t materialIndex;
|
||||
uint16_t materialLayer;
|
||||
uint16_t textureCount;
|
||||
uint16_t textureComboIndex; // Index into texture lookup table
|
||||
uint16_t textureCoordIndex; // Texture coordinate combo index
|
||||
uint16_t textureWeightIndex; // Transparency lookup index
|
||||
uint16_t textureTransformIndex; // Texture animation lookup index
|
||||
};
|
||||
|
||||
// Compressed quaternion (on-disk) for rotation tracks
|
||||
struct CompressedQuat {
|
||||
int16_t x, y, z, w;
|
||||
};
|
||||
|
||||
// M2 attachment point (on-disk)
|
||||
struct M2AttachmentDisk {
|
||||
uint32_t id;
|
||||
uint16_t bone;
|
||||
uint16_t unknown;
|
||||
float position[3];
|
||||
uint8_t trackData[20]; // M2Track<uint8_t> — skip
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
T readValue(const std::vector<uint8_t>& data, uint32_t offset) {
|
||||
if (offset + sizeof(T) > data.size()) {
|
||||
return T{};
|
||||
}
|
||||
T value;
|
||||
std::memcpy(&value, &data[offset], sizeof(T));
|
||||
return value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::vector<T> readArray(const std::vector<uint8_t>& data, uint32_t offset, uint32_t count) {
|
||||
std::vector<T> result;
|
||||
if (count == 0 || offset + count * sizeof(T) > data.size()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
result.resize(count);
|
||||
std::memcpy(result.data(), &data[offset], count * sizeof(T));
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string readString(const std::vector<uint8_t>& data, uint32_t offset, uint32_t length) {
|
||||
if (offset + length > data.size()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
// Strip trailing null bytes (M2 nameLength includes \0)
|
||||
while (length > 0 && data[offset + length - 1] == 0) {
|
||||
length--;
|
||||
}
|
||||
|
||||
return std::string(reinterpret_cast<const char*>(&data[offset]), length);
|
||||
}
|
||||
|
||||
enum class TrackType { VEC3, QUAT_COMPRESSED };
|
||||
|
||||
// Parse an M2 animation track from the binary data.
|
||||
// The track uses an "array of arrays" layout: nTimestamps pairs of {count, offset}.
|
||||
// sequenceFlags: per-sequence flags; sequences WITHOUT flag 0x20 store their keyframe
|
||||
// data in external .anim files, so their sub-array offsets are .anim-relative and must
|
||||
// be skipped when reading from the M2 file.
|
||||
void parseAnimTrack(const std::vector<uint8_t>& data,
|
||||
const M2TrackDisk& disk,
|
||||
M2AnimationTrack& track,
|
||||
TrackType type,
|
||||
const std::vector<uint32_t>& sequenceFlags = {}) {
|
||||
track.interpolationType = disk.interpolationType;
|
||||
track.globalSequence = disk.globalSequence;
|
||||
|
||||
if (disk.nTimestamps == 0 || disk.nKeys == 0) return;
|
||||
|
||||
uint32_t numSubArrays = disk.nTimestamps;
|
||||
track.sequences.resize(numSubArrays);
|
||||
|
||||
for (uint32_t i = 0; i < numSubArrays; i++) {
|
||||
// Sequences without flag 0x20 have their animation data in external .anim files.
|
||||
// Their sub-array offsets are .anim-file-relative, not M2-relative, so reading
|
||||
// from the M2 file would produce garbage data.
|
||||
if (i < sequenceFlags.size() && !(sequenceFlags[i] & 0x20)) continue;
|
||||
// Each sub-array header is {uint32_t count, uint32_t offset} = 8 bytes
|
||||
uint32_t tsHeaderOfs = disk.ofsTimestamps + i * 8;
|
||||
uint32_t keyHeaderOfs = disk.ofsKeys + i * 8;
|
||||
|
||||
if (tsHeaderOfs + 8 > data.size() || keyHeaderOfs + 8 > data.size()) continue;
|
||||
|
||||
uint32_t tsCount = readValue<uint32_t>(data, tsHeaderOfs);
|
||||
uint32_t tsOffset = readValue<uint32_t>(data, tsHeaderOfs + 4);
|
||||
uint32_t keyCount = readValue<uint32_t>(data, keyHeaderOfs);
|
||||
uint32_t keyOffset = readValue<uint32_t>(data, keyHeaderOfs + 4);
|
||||
|
||||
if (tsCount == 0 || keyCount == 0) continue;
|
||||
|
||||
// Validate offsets are within file data (external .anim files have out-of-range offsets)
|
||||
if (tsOffset + tsCount * sizeof(uint32_t) > data.size()) continue;
|
||||
|
||||
// Read timestamps
|
||||
auto timestamps = readArray<uint32_t>(data, tsOffset, tsCount);
|
||||
track.sequences[i].timestamps = std::move(timestamps);
|
||||
|
||||
// Validate key data offset
|
||||
size_t keyElementSize = (type == TrackType::VEC3) ? sizeof(float) * 3 : sizeof(int16_t) * 4;
|
||||
if (keyOffset + keyCount * keyElementSize > data.size()) {
|
||||
track.sequences[i].timestamps.clear();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read key values
|
||||
if (type == TrackType::VEC3) {
|
||||
// Translation/scale: float[3] per key
|
||||
struct Vec3Disk { float x, y, z; };
|
||||
auto values = readArray<Vec3Disk>(data, keyOffset, keyCount);
|
||||
track.sequences[i].vec3Values.reserve(values.size());
|
||||
for (const auto& v : values) {
|
||||
track.sequences[i].vec3Values.emplace_back(v.x, v.y, v.z);
|
||||
}
|
||||
} else {
|
||||
// Rotation: compressed quaternion int16[4] per key
|
||||
auto compressed = readArray<CompressedQuat>(data, keyOffset, keyCount);
|
||||
track.sequences[i].quatValues.reserve(compressed.size());
|
||||
for (const auto& cq : compressed) {
|
||||
// M2 compressed quaternion: offset mapping, NOT simple division
|
||||
// int16 range [-32768..32767] maps to float [-1..1] with offset
|
||||
float fx = (cq.x < 0) ? (cq.x + 32768) / 32767.0f : (cq.x - 32767) / 32767.0f;
|
||||
float fy = (cq.y < 0) ? (cq.y + 32768) / 32767.0f : (cq.y - 32767) / 32767.0f;
|
||||
float fz = (cq.z < 0) ? (cq.z + 32768) / 32767.0f : (cq.z - 32767) / 32767.0f;
|
||||
float fw = (cq.w < 0) ? (cq.w + 32768) / 32767.0f : (cq.w - 32767) / 32767.0f;
|
||||
// M2 on-disk: (x,y,z,w), GLM quat constructor: (w,x,y,z)
|
||||
glm::quat q(fw, fx, fy, fz);
|
||||
float len = glm::length(q);
|
||||
if (len > 0.001f) {
|
||||
q = q / len;
|
||||
} else {
|
||||
q = glm::quat(1.0f, 0.0f, 0.0f, 0.0f); // identity
|
||||
}
|
||||
track.sequences[i].quatValues.push_back(q);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
M2Model M2Loader::load(const std::vector<uint8_t>& m2Data) {
|
||||
M2Model model;
|
||||
|
||||
if (m2Data.size() < sizeof(M2Header)) {
|
||||
core::Logger::getInstance().error("M2 data too small");
|
||||
return model;
|
||||
}
|
||||
|
||||
// Read header
|
||||
M2Header header;
|
||||
std::memcpy(&header, m2Data.data(), sizeof(M2Header));
|
||||
|
||||
// Verify magic
|
||||
if (std::strncmp(header.magic, "MD20", 4) != 0) {
|
||||
core::Logger::getInstance().error("Invalid M2 magic: expected MD20");
|
||||
return model;
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug("Loading M2 model (version ", header.version, ")");
|
||||
|
||||
// Read model name
|
||||
if (header.nameLength > 0 && header.nameOffset > 0) {
|
||||
model.name = readString(m2Data, header.nameOffset, header.nameLength);
|
||||
}
|
||||
|
||||
model.version = header.version;
|
||||
model.globalFlags = header.globalFlags;
|
||||
|
||||
// Bounding box
|
||||
model.boundMin = glm::vec3(header.boundingBox[0], header.boundingBox[1], header.boundingBox[2]);
|
||||
model.boundMax = glm::vec3(header.boundingBox[3], header.boundingBox[4], header.boundingBox[5]);
|
||||
model.boundRadius = header.boundingRadius;
|
||||
|
||||
// Read vertices
|
||||
if (header.nVertices > 0 && header.ofsVertices > 0) {
|
||||
auto diskVerts = readArray<M2VertexDisk>(m2Data, header.ofsVertices, header.nVertices);
|
||||
model.vertices.reserve(diskVerts.size());
|
||||
|
||||
for (const auto& dv : diskVerts) {
|
||||
M2Vertex v;
|
||||
v.position = glm::vec3(dv.pos[0], dv.pos[1], dv.pos[2]);
|
||||
std::memcpy(v.boneWeights, dv.boneWeights, 4);
|
||||
std::memcpy(v.boneIndices, dv.boneIndices, 4);
|
||||
v.normal = glm::vec3(dv.normal[0], dv.normal[1], dv.normal[2]);
|
||||
v.texCoords[0] = glm::vec2(dv.texCoords[0][0], dv.texCoords[0][1]);
|
||||
v.texCoords[1] = glm::vec2(dv.texCoords[1][0], dv.texCoords[1][1]);
|
||||
model.vertices.push_back(v);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug(" Vertices: ", model.vertices.size());
|
||||
}
|
||||
|
||||
// Read animation sequences (needed before bones to know sequence count)
|
||||
if (header.nAnimations > 0 && header.ofsAnimations > 0) {
|
||||
auto diskSeqs = readArray<M2SequenceDisk>(m2Data, header.ofsAnimations, header.nAnimations);
|
||||
model.sequences.reserve(diskSeqs.size());
|
||||
|
||||
for (const auto& ds : diskSeqs) {
|
||||
M2Sequence seq;
|
||||
seq.id = ds.id;
|
||||
seq.variationIndex = ds.variationIndex;
|
||||
seq.duration = ds.duration;
|
||||
seq.movingSpeed = ds.movingSpeed;
|
||||
seq.flags = ds.flags;
|
||||
seq.frequency = ds.frequency;
|
||||
seq.replayMin = ds.replayMin;
|
||||
seq.replayMax = ds.replayMax;
|
||||
seq.blendTime = ds.blendTime;
|
||||
seq.boundMin = glm::vec3(ds.bounds[0], ds.bounds[1], ds.bounds[2]);
|
||||
seq.boundMax = glm::vec3(ds.bounds[3], ds.bounds[4], ds.bounds[5]);
|
||||
seq.boundRadius = ds.boundRadius;
|
||||
seq.nextAnimation = ds.nextAnimation;
|
||||
seq.aliasNext = ds.aliasNext;
|
||||
|
||||
model.sequences.push_back(seq);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug(" Animation sequences: ", model.sequences.size());
|
||||
}
|
||||
|
||||
// Read bones with full animation track data
|
||||
if (header.nBones > 0 && header.ofsBones > 0) {
|
||||
// Verify we have enough data for the full bone structures
|
||||
uint32_t expectedBoneSize = header.nBones * sizeof(M2BoneDisk);
|
||||
if (header.ofsBones + expectedBoneSize > m2Data.size()) {
|
||||
core::Logger::getInstance().warning("M2 bone data extends beyond file, loading with fallback");
|
||||
}
|
||||
|
||||
model.bones.reserve(header.nBones);
|
||||
int bonesWithKeyframes = 0;
|
||||
|
||||
// Build per-sequence flags to skip external-data sequences during M2 parse
|
||||
std::vector<uint32_t> seqFlags;
|
||||
seqFlags.reserve(model.sequences.size());
|
||||
for (const auto& seq : model.sequences) {
|
||||
seqFlags.push_back(seq.flags);
|
||||
}
|
||||
|
||||
for (uint32_t boneIdx = 0; boneIdx < header.nBones; boneIdx++) {
|
||||
uint32_t boneOffset = header.ofsBones + boneIdx * sizeof(M2BoneDisk);
|
||||
if (boneOffset + sizeof(M2BoneDisk) > m2Data.size()) {
|
||||
// Fallback: create identity bone
|
||||
M2Bone bone;
|
||||
bone.keyBoneId = -1;
|
||||
bone.flags = 0;
|
||||
bone.parentBone = -1;
|
||||
bone.submeshId = 0;
|
||||
bone.pivot = glm::vec3(0.0f);
|
||||
model.bones.push_back(bone);
|
||||
continue;
|
||||
}
|
||||
|
||||
M2BoneDisk db = readValue<M2BoneDisk>(m2Data, boneOffset);
|
||||
|
||||
M2Bone bone;
|
||||
bone.keyBoneId = db.keyBoneId;
|
||||
bone.flags = db.flags;
|
||||
bone.parentBone = db.parentBone;
|
||||
bone.submeshId = db.submeshId;
|
||||
bone.pivot = glm::vec3(db.pivot[0], db.pivot[1], db.pivot[2]);
|
||||
|
||||
// Parse animation tracks (skip sequences with external .anim data)
|
||||
parseAnimTrack(m2Data, db.translation, bone.translation, TrackType::VEC3, seqFlags);
|
||||
parseAnimTrack(m2Data, db.rotation, bone.rotation, TrackType::QUAT_COMPRESSED, seqFlags);
|
||||
parseAnimTrack(m2Data, db.scale, bone.scale, TrackType::VEC3, seqFlags);
|
||||
|
||||
if (bone.translation.hasData() || bone.rotation.hasData() || bone.scale.hasData()) {
|
||||
bonesWithKeyframes++;
|
||||
}
|
||||
|
||||
model.bones.push_back(bone);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug(" Bones: ", model.bones.size(),
|
||||
" (", bonesWithKeyframes, " with keyframes)");
|
||||
}
|
||||
|
||||
// Read textures
|
||||
if (header.nTextures > 0 && header.ofsTextures > 0) {
|
||||
auto diskTextures = readArray<M2TextureDisk>(m2Data, header.ofsTextures, header.nTextures);
|
||||
model.textures.reserve(diskTextures.size());
|
||||
|
||||
for (const auto& dt : diskTextures) {
|
||||
M2Texture tex;
|
||||
tex.type = dt.type;
|
||||
tex.flags = dt.flags;
|
||||
|
||||
if (dt.nameLength > 0 && dt.nameOffset > 0) {
|
||||
tex.filename = readString(m2Data, dt.nameOffset, dt.nameLength);
|
||||
}
|
||||
|
||||
model.textures.push_back(tex);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug(" Textures: ", model.textures.size());
|
||||
}
|
||||
|
||||
// Read texture lookup
|
||||
if (header.nTexLookup > 0 && header.ofsTexLookup > 0) {
|
||||
model.textureLookup = readArray<uint16_t>(m2Data, header.ofsTexLookup, header.nTexLookup);
|
||||
}
|
||||
|
||||
// Read attachment points
|
||||
if (header.nAttachments > 0 && header.ofsAttachments > 0) {
|
||||
auto diskAttachments = readArray<M2AttachmentDisk>(m2Data, header.ofsAttachments, header.nAttachments);
|
||||
model.attachments.reserve(diskAttachments.size());
|
||||
for (const auto& da : diskAttachments) {
|
||||
M2Attachment att;
|
||||
att.id = da.id;
|
||||
att.bone = da.bone;
|
||||
att.position = glm::vec3(da.position[0], da.position[1], da.position[2]);
|
||||
model.attachments.push_back(att);
|
||||
}
|
||||
core::Logger::getInstance().debug(" Attachments: ", model.attachments.size());
|
||||
}
|
||||
|
||||
// Read attachment lookup
|
||||
if (header.nAttachmentLookup > 0 && header.ofsAttachmentLookup > 0) {
|
||||
model.attachmentLookup = readArray<uint16_t>(m2Data, header.ofsAttachmentLookup, header.nAttachmentLookup);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug("M2 model loaded: ", model.name);
|
||||
return model;
|
||||
}
|
||||
|
||||
bool M2Loader::loadSkin(const std::vector<uint8_t>& skinData, M2Model& model) {
|
||||
if (skinData.size() < sizeof(M2SkinHeader)) {
|
||||
core::Logger::getInstance().error("Skin data too small");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Read skin header
|
||||
M2SkinHeader header;
|
||||
std::memcpy(&header, skinData.data(), sizeof(M2SkinHeader));
|
||||
|
||||
// Verify magic
|
||||
if (std::strncmp(header.magic, "SKIN", 4) != 0) {
|
||||
core::Logger::getInstance().error("Invalid skin magic: expected SKIN");
|
||||
return false;
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug("Loading M2 skin file");
|
||||
|
||||
// Read vertex lookup table (maps skin-local indices to global vertex indices)
|
||||
std::vector<uint16_t> vertexLookup;
|
||||
if (header.nIndices > 0 && header.ofsIndices > 0) {
|
||||
vertexLookup = readArray<uint16_t>(skinData, header.ofsIndices, header.nIndices);
|
||||
}
|
||||
|
||||
// Read triangle indices (indices into the vertex lookup table)
|
||||
std::vector<uint16_t> triangles;
|
||||
if (header.nTriangles > 0 && header.ofsTriangles > 0) {
|
||||
triangles = readArray<uint16_t>(skinData, header.ofsTriangles, header.nTriangles);
|
||||
}
|
||||
|
||||
// Resolve two-level indirection: triangle index -> lookup table -> global vertex
|
||||
model.indices.clear();
|
||||
model.indices.reserve(triangles.size());
|
||||
uint32_t outOfBounds = 0;
|
||||
for (uint16_t triIdx : triangles) {
|
||||
if (triIdx < vertexLookup.size()) {
|
||||
uint16_t globalIdx = vertexLookup[triIdx];
|
||||
if (globalIdx < model.vertices.size()) {
|
||||
model.indices.push_back(globalIdx);
|
||||
} else {
|
||||
model.indices.push_back(0);
|
||||
outOfBounds++;
|
||||
}
|
||||
} else {
|
||||
model.indices.push_back(0);
|
||||
outOfBounds++;
|
||||
}
|
||||
}
|
||||
core::Logger::getInstance().debug(" Resolved ", model.indices.size(), " final indices");
|
||||
if (outOfBounds > 0) {
|
||||
core::Logger::getInstance().warning(" ", outOfBounds, " out-of-bounds indices clamped to 0");
|
||||
}
|
||||
|
||||
// Read submeshes (proper vertex/index ranges)
|
||||
std::vector<M2SkinSubmesh> submeshes;
|
||||
if (header.nSubmeshes > 0 && header.ofsSubmeshes > 0) {
|
||||
submeshes = readArray<M2SkinSubmesh>(skinData, header.ofsSubmeshes, header.nSubmeshes);
|
||||
core::Logger::getInstance().debug(" Submeshes: ", submeshes.size());
|
||||
for (size_t i = 0; i < submeshes.size(); i++) {
|
||||
const auto& sm = submeshes[i];
|
||||
core::Logger::getInstance().info(" SkinSection[", i, "]: id=", sm.id,
|
||||
" level=", sm.level,
|
||||
" vtxStart=", sm.vertexStart, " vtxCount=", sm.vertexCount,
|
||||
" idxStart=", sm.indexStart, " idxCount=", sm.indexCount,
|
||||
" boneCount=", sm.boneCount, " boneStart=", sm.boneStart);
|
||||
}
|
||||
}
|
||||
|
||||
// Read batches with proper submesh references
|
||||
if (header.nBatches > 0 && header.ofsBatches > 0) {
|
||||
auto diskBatches = readArray<M2BatchDisk>(skinData, header.ofsBatches, header.nBatches);
|
||||
model.batches.clear();
|
||||
model.batches.reserve(diskBatches.size());
|
||||
|
||||
for (size_t i = 0; i < diskBatches.size(); i++) {
|
||||
const auto& db = diskBatches[i];
|
||||
M2Batch batch;
|
||||
|
||||
batch.flags = db.flags;
|
||||
batch.priorityPlane = db.priorityPlane;
|
||||
batch.shader = db.shader;
|
||||
batch.skinSectionIndex = db.skinSectionIndex;
|
||||
batch.colorIndex = db.colorIndex;
|
||||
batch.materialIndex = db.materialIndex;
|
||||
batch.materialLayer = db.materialLayer;
|
||||
batch.textureCount = db.textureCount;
|
||||
batch.textureIndex = db.textureComboIndex;
|
||||
batch.textureUnit = db.textureCoordIndex;
|
||||
batch.transparencyIndex = db.textureWeightIndex;
|
||||
batch.textureAnimIndex = db.textureTransformIndex;
|
||||
|
||||
// Look up proper vertex/index ranges from submesh
|
||||
if (db.skinSectionIndex < submeshes.size()) {
|
||||
const auto& sm = submeshes[db.skinSectionIndex];
|
||||
batch.indexStart = sm.indexStart;
|
||||
batch.indexCount = sm.indexCount;
|
||||
batch.vertexStart = sm.vertexStart;
|
||||
batch.vertexCount = sm.vertexCount;
|
||||
batch.submeshId = sm.id;
|
||||
batch.submeshLevel = sm.level;
|
||||
} else {
|
||||
// Fallback: render entire model as one batch
|
||||
batch.indexStart = 0;
|
||||
batch.indexCount = model.indices.size();
|
||||
batch.vertexStart = 0;
|
||||
batch.vertexCount = model.vertices.size();
|
||||
}
|
||||
|
||||
model.batches.push_back(batch);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().debug(" Batches: ", model.batches.size());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void M2Loader::loadAnimFile(const std::vector<uint8_t>& m2Data,
|
||||
const std::vector<uint8_t>& animData,
|
||||
uint32_t sequenceIndex,
|
||||
M2Model& model) {
|
||||
if (m2Data.size() < sizeof(M2Header) || animData.empty()) return;
|
||||
|
||||
M2Header header;
|
||||
std::memcpy(&header, m2Data.data(), sizeof(M2Header));
|
||||
|
||||
if (header.nBones == 0 || header.ofsBones == 0) return;
|
||||
if (sequenceIndex >= model.sequences.size()) return;
|
||||
|
||||
int patchedTracks = 0;
|
||||
|
||||
for (uint32_t boneIdx = 0; boneIdx < header.nBones && boneIdx < model.bones.size(); boneIdx++) {
|
||||
uint32_t boneOffset = header.ofsBones + boneIdx * sizeof(M2BoneDisk);
|
||||
if (boneOffset + sizeof(M2BoneDisk) > m2Data.size()) continue;
|
||||
|
||||
M2BoneDisk db = readValue<M2BoneDisk>(m2Data, boneOffset);
|
||||
auto& bone = model.bones[boneIdx];
|
||||
|
||||
// Helper to patch one track for this sequence index
|
||||
auto patchTrack = [&](const M2TrackDisk& disk, M2AnimationTrack& track, TrackType type) {
|
||||
if (disk.nTimestamps == 0 || disk.nKeys == 0) return;
|
||||
if (sequenceIndex >= disk.nTimestamps) return;
|
||||
|
||||
// Ensure track.sequences is large enough
|
||||
if (track.sequences.size() <= sequenceIndex) {
|
||||
track.sequences.resize(sequenceIndex + 1);
|
||||
}
|
||||
|
||||
auto& seqKeys = track.sequences[sequenceIndex];
|
||||
|
||||
// Already has data (loaded from main M2 file)
|
||||
if (!seqKeys.timestamps.empty()) return;
|
||||
|
||||
// Read sub-array header for this sequence from the M2 file
|
||||
uint32_t tsHeaderOfs = disk.ofsTimestamps + sequenceIndex * 8;
|
||||
uint32_t keyHeaderOfs = disk.ofsKeys + sequenceIndex * 8;
|
||||
if (tsHeaderOfs + 8 > m2Data.size() || keyHeaderOfs + 8 > m2Data.size()) return;
|
||||
|
||||
uint32_t tsCount = readValue<uint32_t>(m2Data, tsHeaderOfs);
|
||||
uint32_t tsOffset = readValue<uint32_t>(m2Data, tsHeaderOfs + 4);
|
||||
uint32_t keyCount = readValue<uint32_t>(m2Data, keyHeaderOfs);
|
||||
uint32_t keyOffset = readValue<uint32_t>(m2Data, keyHeaderOfs + 4);
|
||||
|
||||
if (tsCount == 0 || keyCount == 0) return;
|
||||
|
||||
// These offsets point into the .anim file data
|
||||
if (tsOffset + tsCount * sizeof(uint32_t) > animData.size()) return;
|
||||
|
||||
size_t keyElementSize = (type == TrackType::VEC3) ? sizeof(float) * 3 : sizeof(int16_t) * 4;
|
||||
if (keyOffset + keyCount * keyElementSize > animData.size()) return;
|
||||
|
||||
// Read timestamps from .anim data
|
||||
auto timestamps = readArray<uint32_t>(animData, tsOffset, tsCount);
|
||||
seqKeys.timestamps = std::move(timestamps);
|
||||
|
||||
// Read key values from .anim data
|
||||
if (type == TrackType::VEC3) {
|
||||
struct Vec3Disk { float x, y, z; };
|
||||
auto values = readArray<Vec3Disk>(animData, keyOffset, keyCount);
|
||||
seqKeys.vec3Values.reserve(values.size());
|
||||
for (const auto& v : values) {
|
||||
seqKeys.vec3Values.emplace_back(v.x, v.y, v.z);
|
||||
}
|
||||
} else {
|
||||
auto compressed = readArray<CompressedQuat>(animData, keyOffset, keyCount);
|
||||
seqKeys.quatValues.reserve(compressed.size());
|
||||
for (const auto& cq : compressed) {
|
||||
float fx = (cq.x < 0) ? (cq.x + 32768) / 32767.0f : (cq.x - 32767) / 32767.0f;
|
||||
float fy = (cq.y < 0) ? (cq.y + 32768) / 32767.0f : (cq.y - 32767) / 32767.0f;
|
||||
float fz = (cq.z < 0) ? (cq.z + 32768) / 32767.0f : (cq.z - 32767) / 32767.0f;
|
||||
float fw = (cq.w < 0) ? (cq.w + 32768) / 32767.0f : (cq.w - 32767) / 32767.0f;
|
||||
glm::quat q(fw, fx, fy, fz);
|
||||
float len = glm::length(q);
|
||||
if (len > 0.001f) {
|
||||
q = q / len;
|
||||
} else {
|
||||
q = glm::quat(1.0f, 0.0f, 0.0f, 0.0f);
|
||||
}
|
||||
seqKeys.quatValues.push_back(q);
|
||||
}
|
||||
}
|
||||
patchedTracks++;
|
||||
};
|
||||
|
||||
patchTrack(db.translation, bone.translation, TrackType::VEC3);
|
||||
patchTrack(db.rotation, bone.rotation, TrackType::QUAT_COMPRESSED);
|
||||
patchTrack(db.scale, bone.scale, TrackType::VEC3);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().info("Loaded .anim for sequence ", sequenceIndex,
|
||||
" (id=", model.sequences[sequenceIndex].id, "): patched ", patchedTracks, " bone tracks");
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
358
src/pipeline/mpq_manager.cpp
Normal file
358
src/pipeline/mpq_manager.cpp
Normal file
|
|
@ -0,0 +1,358 @@
|
|||
#include "pipeline/mpq_manager.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <algorithm>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
#include <StormLib.h>
|
||||
#endif
|
||||
|
||||
// Define HANDLE and INVALID_HANDLE_VALUE for both cases
|
||||
#ifndef HAVE_STORMLIB
|
||||
typedef void* HANDLE;
|
||||
#endif
|
||||
|
||||
#ifndef INVALID_HANDLE_VALUE
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)(long long)-1)
|
||||
#endif
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
MPQManager::MPQManager() = default;
|
||||
|
||||
MPQManager::~MPQManager() {
|
||||
shutdown();
|
||||
}
|
||||
|
||||
bool MPQManager::initialize(const std::string& dataPath_) {
|
||||
if (initialized) {
|
||||
LOG_WARNING("MPQManager already initialized");
|
||||
return true;
|
||||
}
|
||||
|
||||
dataPath = dataPath_;
|
||||
LOG_INFO("Initializing MPQ manager with data path: ", dataPath);
|
||||
|
||||
// Check if data directory exists
|
||||
if (!std::filesystem::exists(dataPath)) {
|
||||
LOG_ERROR("Data directory does not exist: ", dataPath);
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
// Load base archives (in order of priority)
|
||||
std::vector<std::string> baseArchives = {
|
||||
"common.MPQ",
|
||||
"common-2.MPQ",
|
||||
"expansion.MPQ",
|
||||
"lichking.MPQ",
|
||||
};
|
||||
|
||||
for (const auto& archive : baseArchives) {
|
||||
std::string fullPath = dataPath + "/" + archive;
|
||||
if (std::filesystem::exists(fullPath)) {
|
||||
loadArchive(fullPath, 100); // Base archives have priority 100
|
||||
} else {
|
||||
LOG_DEBUG("Base archive not found (optional): ", archive);
|
||||
}
|
||||
}
|
||||
|
||||
// Load patch archives (highest priority)
|
||||
loadPatchArchives();
|
||||
|
||||
// Load locale archives
|
||||
loadLocaleArchives("enUS"); // TODO: Make configurable
|
||||
|
||||
if (archives.empty()) {
|
||||
LOG_WARNING("No MPQ archives loaded - will use loose file fallback");
|
||||
} else {
|
||||
LOG_INFO("MPQ manager initialized with ", archives.size(), " archives");
|
||||
}
|
||||
#else
|
||||
LOG_WARNING("StormLib not available - using loose file fallback only");
|
||||
#endif
|
||||
|
||||
initialized = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
void MPQManager::shutdown() {
|
||||
if (!initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
LOG_INFO("Shutting down MPQ manager");
|
||||
for (auto& entry : archives) {
|
||||
if (entry.handle != INVALID_HANDLE_VALUE) {
|
||||
SFileCloseArchive(entry.handle);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
archives.clear();
|
||||
archiveNames.clear();
|
||||
initialized = false;
|
||||
}
|
||||
|
||||
bool MPQManager::loadArchive(const std::string& path, int priority) {
|
||||
#ifndef HAVE_STORMLIB
|
||||
LOG_ERROR("Cannot load archive - StormLib not available");
|
||||
return false;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
// Check if file exists
|
||||
if (!std::filesystem::exists(path)) {
|
||||
LOG_ERROR("Archive file not found: ", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
HANDLE handle = INVALID_HANDLE_VALUE;
|
||||
if (!SFileOpenArchive(path.c_str(), 0, 0, &handle)) {
|
||||
LOG_ERROR("Failed to open MPQ archive: ", path);
|
||||
return false;
|
||||
}
|
||||
|
||||
ArchiveEntry entry;
|
||||
entry.handle = handle;
|
||||
entry.path = path;
|
||||
entry.priority = priority;
|
||||
|
||||
archives.push_back(entry);
|
||||
archiveNames.push_back(path);
|
||||
|
||||
// Sort archives by priority (highest first)
|
||||
std::sort(archives.begin(), archives.end(),
|
||||
[](const ArchiveEntry& a, const ArchiveEntry& b) {
|
||||
return a.priority > b.priority;
|
||||
});
|
||||
|
||||
LOG_INFO("Loaded MPQ archive: ", path, " (priority ", priority, ")");
|
||||
return true;
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MPQManager::fileExists(const std::string& filename) const {
|
||||
#ifdef HAVE_STORMLIB
|
||||
// Check MPQ archives first if available
|
||||
if (!archives.empty()) {
|
||||
HANDLE archive = findFileArchive(filename);
|
||||
if (archive != INVALID_HANDLE_VALUE) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Fall back to checking for loose file
|
||||
std::string loosePath = filename;
|
||||
std::replace(loosePath.begin(), loosePath.end(), '\\', '/');
|
||||
std::string fullPath = dataPath + "/" + loosePath;
|
||||
return std::filesystem::exists(fullPath);
|
||||
}
|
||||
|
||||
std::vector<uint8_t> MPQManager::readFile(const std::string& filename) const {
|
||||
#ifdef HAVE_STORMLIB
|
||||
// Try MPQ archives first if available
|
||||
if (!archives.empty()) {
|
||||
HANDLE archive = findFileArchive(filename);
|
||||
if (archive != INVALID_HANDLE_VALUE) {
|
||||
// Open the file
|
||||
HANDLE file = INVALID_HANDLE_VALUE;
|
||||
if (SFileOpenFileEx(archive, filename.c_str(), 0, &file)) {
|
||||
// Get file size
|
||||
DWORD fileSize = SFileGetFileSize(file, nullptr);
|
||||
if (fileSize > 0 && fileSize != SFILE_INVALID_SIZE) {
|
||||
// Read file data
|
||||
std::vector<uint8_t> data(fileSize);
|
||||
DWORD bytesRead = 0;
|
||||
if (SFileReadFile(file, data.data(), fileSize, &bytesRead, nullptr)) {
|
||||
SFileCloseFile(file);
|
||||
LOG_DEBUG("Read file from MPQ: ", filename, " (", bytesRead, " bytes)");
|
||||
return data;
|
||||
}
|
||||
}
|
||||
SFileCloseFile(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Fall back to loose file loading
|
||||
// Convert WoW path (backslashes) to filesystem path (forward slashes)
|
||||
std::string loosePath = filename;
|
||||
std::replace(loosePath.begin(), loosePath.end(), '\\', '/');
|
||||
|
||||
// Try with original case
|
||||
std::string fullPath = dataPath + "/" + loosePath;
|
||||
if (std::filesystem::exists(fullPath)) {
|
||||
std::ifstream file(fullPath, std::ios::binary | std::ios::ate);
|
||||
if (file.is_open()) {
|
||||
size_t size = file.tellg();
|
||||
file.seekg(0, std::ios::beg);
|
||||
std::vector<uint8_t> data(size);
|
||||
file.read(reinterpret_cast<char*>(data.data()), size);
|
||||
LOG_DEBUG("Read loose file: ", loosePath, " (", size, " bytes)");
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
// Try case-insensitive search (common for Linux)
|
||||
std::filesystem::path searchPath = dataPath;
|
||||
std::vector<std::string> pathComponents;
|
||||
std::istringstream iss(loosePath);
|
||||
std::string component;
|
||||
while (std::getline(iss, component, '/')) {
|
||||
if (!component.empty()) {
|
||||
pathComponents.push_back(component);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find file with case-insensitive matching
|
||||
for (const auto& comp : pathComponents) {
|
||||
bool found = false;
|
||||
if (std::filesystem::exists(searchPath) && std::filesystem::is_directory(searchPath)) {
|
||||
for (const auto& entry : std::filesystem::directory_iterator(searchPath)) {
|
||||
std::string entryName = entry.path().filename().string();
|
||||
// Case-insensitive comparison
|
||||
if (std::equal(comp.begin(), comp.end(), entryName.begin(), entryName.end(),
|
||||
[](char a, char b) { return std::tolower(a) == std::tolower(b); })) {
|
||||
searchPath = entry.path();
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
LOG_WARNING("File not found: ", filename);
|
||||
return std::vector<uint8_t>();
|
||||
}
|
||||
}
|
||||
|
||||
// Try to read the found file
|
||||
if (std::filesystem::exists(searchPath) && std::filesystem::is_regular_file(searchPath)) {
|
||||
std::ifstream file(searchPath, std::ios::binary | std::ios::ate);
|
||||
if (file.is_open()) {
|
||||
size_t size = file.tellg();
|
||||
file.seekg(0, std::ios::beg);
|
||||
std::vector<uint8_t> data(size);
|
||||
file.read(reinterpret_cast<char*>(data.data()), size);
|
||||
LOG_DEBUG("Read loose file (case-insensitive): ", searchPath.string(), " (", size, " bytes)");
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_WARNING("File not found: ", filename);
|
||||
return std::vector<uint8_t>();
|
||||
}
|
||||
|
||||
uint32_t MPQManager::getFileSize(const std::string& filename) const {
|
||||
#ifndef HAVE_STORMLIB
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
HANDLE archive = findFileArchive(filename);
|
||||
if (archive == INVALID_HANDLE_VALUE) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
HANDLE file = INVALID_HANDLE_VALUE;
|
||||
if (!SFileOpenFileEx(archive, filename.c_str(), 0, &file)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
DWORD fileSize = SFileGetFileSize(file, nullptr);
|
||||
SFileCloseFile(file);
|
||||
|
||||
return (fileSize == SFILE_INVALID_SIZE) ? 0 : fileSize;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
HANDLE MPQManager::findFileArchive(const std::string& filename) const {
|
||||
#ifndef HAVE_STORMLIB
|
||||
return INVALID_HANDLE_VALUE;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_STORMLIB
|
||||
// Search archives in priority order (already sorted)
|
||||
for (const auto& entry : archives) {
|
||||
if (SFileHasFile(entry.handle, filename.c_str())) {
|
||||
return entry.handle;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
bool MPQManager::loadPatchArchives() {
|
||||
#ifndef HAVE_STORMLIB
|
||||
return false;
|
||||
#endif
|
||||
|
||||
// WoW 3.3.5a patch archives (in order of priority, highest first)
|
||||
std::vector<std::pair<std::string, int>> patchArchives = {
|
||||
{"patch-5.MPQ", 500},
|
||||
{"patch-4.MPQ", 400},
|
||||
{"patch-3.MPQ", 300},
|
||||
{"patch-2.MPQ", 200},
|
||||
{"patch.MPQ", 150},
|
||||
};
|
||||
|
||||
int loadedPatches = 0;
|
||||
for (const auto& [archive, priority] : patchArchives) {
|
||||
std::string fullPath = dataPath + "/" + archive;
|
||||
if (std::filesystem::exists(fullPath)) {
|
||||
if (loadArchive(fullPath, priority)) {
|
||||
loadedPatches++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded ", loadedPatches, " patch archives");
|
||||
return loadedPatches > 0;
|
||||
}
|
||||
|
||||
bool MPQManager::loadLocaleArchives(const std::string& locale) {
|
||||
#ifndef HAVE_STORMLIB
|
||||
return false;
|
||||
#endif
|
||||
|
||||
std::string localePath = dataPath + "/" + locale;
|
||||
if (!std::filesystem::exists(localePath)) {
|
||||
LOG_WARNING("Locale directory not found: ", localePath);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Locale-specific archives
|
||||
std::vector<std::pair<std::string, int>> localeArchives = {
|
||||
{"locale-" + locale + ".MPQ", 250},
|
||||
{"patch-" + locale + ".MPQ", 450},
|
||||
{"patch-" + locale + "-2.MPQ", 460},
|
||||
{"patch-" + locale + "-3.MPQ", 470},
|
||||
};
|
||||
|
||||
int loadedLocale = 0;
|
||||
for (const auto& [archive, priority] : localeArchives) {
|
||||
std::string fullPath = localePath + "/" + archive;
|
||||
if (std::filesystem::exists(fullPath)) {
|
||||
if (loadArchive(fullPath, priority)) {
|
||||
loadedLocale++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("Loaded ", loadedLocale, " locale archives for ", locale);
|
||||
return loadedLocale > 0;
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
345
src/pipeline/terrain_mesh.cpp
Normal file
345
src/pipeline/terrain_mesh.cpp
Normal file
|
|
@ -0,0 +1,345 @@
|
|||
#include "pipeline/terrain_mesh.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cmath>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
TerrainMesh TerrainMeshGenerator::generate(const ADTTerrain& terrain) {
|
||||
TerrainMesh mesh;
|
||||
|
||||
if (!terrain.isLoaded()) {
|
||||
LOG_WARNING("Attempting to generate mesh from unloaded terrain");
|
||||
return mesh;
|
||||
}
|
||||
|
||||
LOG_INFO("Generating terrain mesh for ADT...");
|
||||
|
||||
// Copy texture list
|
||||
mesh.textures = terrain.textures;
|
||||
|
||||
// Generate mesh for each chunk
|
||||
int validCount = 0;
|
||||
bool loggedFirstChunk = false;
|
||||
for (int y = 0; y < 16; y++) {
|
||||
for (int x = 0; x < 16; x++) {
|
||||
const MapChunk& chunk = terrain.getChunk(x, y);
|
||||
|
||||
if (chunk.hasHeightMap()) {
|
||||
mesh.getChunk(x, y) = generateChunkMesh(chunk, x, y, terrain.coord.x, terrain.coord.y);
|
||||
validCount++;
|
||||
|
||||
// Debug: log first chunk world position
|
||||
if (!loggedFirstChunk) {
|
||||
loggedFirstChunk = true;
|
||||
LOG_DEBUG("First terrain chunk world pos: (", chunk.position[0], ", ",
|
||||
chunk.position[1], ", ", chunk.position[2], ")");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mesh.validChunkCount = validCount;
|
||||
LOG_INFO("Generated ", validCount, " terrain chunk meshes");
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
ChunkMesh TerrainMeshGenerator::generateChunkMesh(const MapChunk& chunk, int chunkX, int chunkY, int tileX, int tileY) {
|
||||
ChunkMesh mesh;
|
||||
|
||||
mesh.chunkX = chunkX;
|
||||
mesh.chunkY = chunkY;
|
||||
|
||||
// World position from chunk data
|
||||
mesh.worldX = chunk.position[0];
|
||||
mesh.worldY = chunk.position[1];
|
||||
mesh.worldZ = chunk.position[2];
|
||||
|
||||
// Generate vertices from heightmap (pass chunk grid indices and tile coords)
|
||||
mesh.vertices = generateVertices(chunk, chunkX, chunkY, tileX, tileY);
|
||||
|
||||
// Generate triangle indices (checks for holes)
|
||||
mesh.indices = generateIndices(chunk);
|
||||
|
||||
// Debug: verify mesh integrity (one-time)
|
||||
static bool debugLogged = false;
|
||||
if (!debugLogged && chunkX == 0 && chunkY == 0) {
|
||||
debugLogged = true;
|
||||
LOG_INFO("Terrain mesh debug: ", mesh.vertices.size(), " vertices, ",
|
||||
mesh.indices.size(), " indices (", mesh.indices.size() / 3, " triangles)");
|
||||
|
||||
// Verify all indices are in bounds
|
||||
int maxIndex = 0;
|
||||
int minIndex = 9999;
|
||||
for (auto idx : mesh.indices) {
|
||||
if (static_cast<int>(idx) > maxIndex) maxIndex = idx;
|
||||
if (static_cast<int>(idx) < minIndex) minIndex = idx;
|
||||
}
|
||||
LOG_INFO("Index range: [", minIndex, ", ", maxIndex, "] (expected [0, 144])");
|
||||
|
||||
if (maxIndex >= static_cast<int>(mesh.vertices.size())) {
|
||||
LOG_ERROR("INDEX OUT OF BOUNDS! Max index ", maxIndex, " >= vertex count ", mesh.vertices.size());
|
||||
}
|
||||
|
||||
// Check for invalid vertex positions
|
||||
int invalidCount = 0;
|
||||
for (size_t i = 0; i < mesh.vertices.size(); i++) {
|
||||
const auto& v = mesh.vertices[i];
|
||||
if (!std::isfinite(v.position[0]) || !std::isfinite(v.position[1]) || !std::isfinite(v.position[2])) {
|
||||
invalidCount++;
|
||||
}
|
||||
}
|
||||
if (invalidCount > 0) {
|
||||
LOG_ERROR("Found ", invalidCount, " vertices with invalid positions!");
|
||||
}
|
||||
}
|
||||
|
||||
// Copy texture layers
|
||||
for (size_t layerIdx = 0; layerIdx < chunk.layers.size(); layerIdx++) {
|
||||
const auto& layer = chunk.layers[layerIdx];
|
||||
ChunkMesh::LayerInfo layerInfo;
|
||||
layerInfo.textureId = layer.textureId;
|
||||
layerInfo.flags = layer.flags;
|
||||
|
||||
// Extract alpha data for this layer if it has alpha
|
||||
if (layer.useAlpha() && layer.offsetMCAL < chunk.alphaMap.size()) {
|
||||
size_t offset = layer.offsetMCAL;
|
||||
|
||||
// Compute actual per-layer size from next layer's offset (not total remaining)
|
||||
size_t layerSize;
|
||||
bool foundNext = false;
|
||||
for (size_t j = layerIdx + 1; j < chunk.layers.size(); j++) {
|
||||
if (chunk.layers[j].useAlpha()) {
|
||||
layerSize = chunk.layers[j].offsetMCAL - offset;
|
||||
foundNext = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!foundNext) {
|
||||
layerSize = chunk.alphaMap.size() - offset;
|
||||
}
|
||||
|
||||
if (layer.compressedAlpha()) {
|
||||
// Decompress RLE-compressed alpha map to 64x64 = 4096 bytes
|
||||
layerInfo.alphaData.resize(4096, 0);
|
||||
size_t readPos = offset;
|
||||
size_t writePos = 0;
|
||||
|
||||
while (writePos < 4096 && readPos < chunk.alphaMap.size()) {
|
||||
uint8_t cmd = chunk.alphaMap[readPos++];
|
||||
bool fill = (cmd & 0x80) != 0;
|
||||
int count = (cmd & 0x7F) + 1;
|
||||
|
||||
if (fill) {
|
||||
if (readPos < chunk.alphaMap.size()) {
|
||||
uint8_t val = chunk.alphaMap[readPos++];
|
||||
for (int i = 0; i < count && writePos < 4096; i++) {
|
||||
layerInfo.alphaData[writePos++] = val;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < count && writePos < 4096 && readPos < chunk.alphaMap.size(); i++) {
|
||||
layerInfo.alphaData[writePos++] = chunk.alphaMap[readPos++];
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (layerSize >= 4096) {
|
||||
// Big alpha: 64x64 at 8-bit = 4096 bytes
|
||||
layerInfo.alphaData.resize(4096);
|
||||
std::copy(chunk.alphaMap.begin() + offset,
|
||||
chunk.alphaMap.begin() + offset + 4096,
|
||||
layerInfo.alphaData.begin());
|
||||
} else if (layerSize >= 2048) {
|
||||
// Non-big alpha: 2048 bytes = 4-bit per texel, 64x64
|
||||
// Each byte: low nibble = first texel, high nibble = second texel
|
||||
// Scale 0-15 to 0-255 (multiply by 17)
|
||||
layerInfo.alphaData.resize(4096);
|
||||
for (size_t i = 0; i < 2048; i++) {
|
||||
uint8_t byte = chunk.alphaMap[offset + i];
|
||||
layerInfo.alphaData[i * 2] = (byte & 0x0F) * 17;
|
||||
layerInfo.alphaData[i * 2 + 1] = (byte >> 4) * 17;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mesh.layers.push_back(layerInfo);
|
||||
}
|
||||
|
||||
return mesh;
|
||||
}
|
||||
|
||||
std::vector<TerrainVertex> TerrainMeshGenerator::generateVertices(const MapChunk& chunk, int chunkX, int chunkY, int tileX, int tileY) {
|
||||
std::vector<TerrainVertex> vertices;
|
||||
vertices.reserve(145); // 145 vertices total
|
||||
|
||||
const HeightMap& heightMap = chunk.heightMap;
|
||||
|
||||
// WoW terrain uses 145 heights stored in a 9x17 row-major grid layout
|
||||
const float unitSize = CHUNK_SIZE / 8.0f; // 66.67 units per vertex step
|
||||
|
||||
// chunk.position contains world coordinates for this chunk's origin
|
||||
// Both X and Y are at world scale (no scaling needed)
|
||||
float chunkBaseX = chunk.position[0];
|
||||
float chunkBaseY = chunk.position[1];
|
||||
|
||||
for (int index = 0; index < 145; index++) {
|
||||
int y = index / 17; // Row (0-8)
|
||||
int x = index % 17; // Column (0-16)
|
||||
|
||||
// Columns 9-16 are offset by 0.5 units (wowee exact logic)
|
||||
float offsetX = static_cast<float>(x);
|
||||
float offsetY = static_cast<float>(y);
|
||||
|
||||
if (x > 8) {
|
||||
offsetY += 0.5f;
|
||||
offsetX -= 8.5f;
|
||||
}
|
||||
|
||||
TerrainVertex vertex;
|
||||
|
||||
// Position - match wowee.js coordinate layout (swap X/Y and negate)
|
||||
// wowee.js: X = -(y * unitSize), Y = -(x * unitSize)
|
||||
vertex.position[0] = chunkBaseX - (offsetY * unitSize);
|
||||
vertex.position[1] = chunkBaseY - (offsetX * unitSize);
|
||||
vertex.position[2] = chunk.position[2] + heightMap.heights[index];
|
||||
|
||||
// Normal
|
||||
if (index * 3 + 2 < static_cast<int>(chunk.normals.size())) {
|
||||
decompressNormal(&chunk.normals[index * 3], vertex.normal);
|
||||
} else {
|
||||
// Default up normal
|
||||
vertex.normal[0] = 0.0f;
|
||||
vertex.normal[1] = 0.0f;
|
||||
vertex.normal[2] = 1.0f;
|
||||
}
|
||||
|
||||
// Texture coordinates (0-1 per chunk, tiles with GL_REPEAT)
|
||||
vertex.texCoord[0] = offsetX / 8.0f;
|
||||
vertex.texCoord[1] = offsetY / 8.0f;
|
||||
|
||||
// Layer UV for alpha map sampling (0-1 range per chunk)
|
||||
vertex.layerUV[0] = offsetX / 8.0f;
|
||||
vertex.layerUV[1] = offsetY / 8.0f;
|
||||
|
||||
vertices.push_back(vertex);
|
||||
}
|
||||
|
||||
return vertices;
|
||||
}
|
||||
|
||||
std::vector<TerrainIndex> TerrainMeshGenerator::generateIndices(const MapChunk& chunk) {
|
||||
std::vector<TerrainIndex> indices;
|
||||
indices.reserve(768); // 8x8 quads * 4 triangles * 3 indices = 768
|
||||
|
||||
// Generate indices based on 9x17 grid layout (matching wowee.js)
|
||||
// Each quad uses a center vertex with 4 surrounding vertices
|
||||
// Index offsets from center: -9, -8, +9, +8
|
||||
|
||||
int holesSkipped = 0;
|
||||
for (int y = 0; y < 8; y++) {
|
||||
for (int x = 0; x < 8; x++) {
|
||||
// Skip quads that are marked as holes (cave entrances, etc.)
|
||||
if (chunk.isHole(y, x)) {
|
||||
holesSkipped++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Center vertex index in the 9x17 grid
|
||||
int center = 9 + y * 17 + x;
|
||||
|
||||
// Four triangles per quad
|
||||
// Using CCW winding when viewed from +Z (top-down)
|
||||
int tl = center - 9; // top-left outer
|
||||
int tr = center - 8; // top-right outer
|
||||
int bl = center + 8; // bottom-left outer
|
||||
int br = center + 9; // bottom-right outer
|
||||
|
||||
// Triangle 1: top (center, tl, tr)
|
||||
indices.push_back(center);
|
||||
indices.push_back(tl);
|
||||
indices.push_back(tr);
|
||||
|
||||
// Triangle 2: right (center, tr, br)
|
||||
indices.push_back(center);
|
||||
indices.push_back(tr);
|
||||
indices.push_back(br);
|
||||
|
||||
// Triangle 3: bottom (center, br, bl)
|
||||
indices.push_back(center);
|
||||
indices.push_back(br);
|
||||
indices.push_back(bl);
|
||||
|
||||
// Triangle 4: left (center, bl, tl)
|
||||
indices.push_back(center);
|
||||
indices.push_back(bl);
|
||||
indices.push_back(tl);
|
||||
}
|
||||
}
|
||||
|
||||
// Debug: log if any holes were skipped (one-time per session)
|
||||
static bool holesLogged = false;
|
||||
if (!holesLogged && holesSkipped > 0) {
|
||||
holesLogged = true;
|
||||
LOG_INFO("Terrain holes: skipped ", holesSkipped, " quads due to hole mask (holes=0x",
|
||||
std::hex, chunk.holes, std::dec, ")");
|
||||
}
|
||||
|
||||
return indices;
|
||||
}
|
||||
|
||||
void TerrainMeshGenerator::calculateTexCoords(TerrainVertex& vertex, int x, int y) {
|
||||
// Base texture coordinates (0-1 range across chunk)
|
||||
vertex.texCoord[0] = x / 16.0f;
|
||||
vertex.texCoord[1] = y / 16.0f;
|
||||
|
||||
// Layer UVs (same as base for now)
|
||||
vertex.layerUV[0] = vertex.texCoord[0];
|
||||
vertex.layerUV[1] = vertex.texCoord[1];
|
||||
}
|
||||
|
||||
void TerrainMeshGenerator::decompressNormal(const int8_t* compressedNormal, float* normal) {
|
||||
// WoW stores normals as signed bytes (-127 to 127)
|
||||
// Convert to float and normalize
|
||||
|
||||
float x = compressedNormal[0] / 127.0f;
|
||||
float y = compressedNormal[1] / 127.0f;
|
||||
float z = compressedNormal[2] / 127.0f;
|
||||
|
||||
// Normalize
|
||||
float length = std::sqrt(x * x + y * y + z * z);
|
||||
if (length > 0.0001f) {
|
||||
normal[0] = x / length;
|
||||
normal[1] = y / length;
|
||||
normal[2] = z / length;
|
||||
} else {
|
||||
// Default up normal if degenerate
|
||||
normal[0] = 0.0f;
|
||||
normal[1] = 0.0f;
|
||||
normal[2] = 1.0f;
|
||||
}
|
||||
}
|
||||
|
||||
int TerrainMeshGenerator::getVertexIndex(int x, int y) {
|
||||
// Convert virtual grid position (0-16) to actual vertex index (0-144)
|
||||
// Outer vertices (even positions): 0-80 (9x9 grid)
|
||||
// Inner vertices (odd positions): 81-144 (8x8 grid)
|
||||
|
||||
bool isOuter = (y % 2 == 0) && (x % 2 == 0);
|
||||
bool isInner = (y % 2 == 1) && (x % 2 == 1);
|
||||
|
||||
if (isOuter) {
|
||||
int gridX = x / 2;
|
||||
int gridY = y / 2;
|
||||
return gridY * 9 + gridX; // 0-80
|
||||
} else if (isInner) {
|
||||
int gridX = (x - 1) / 2;
|
||||
int gridY = (y - 1) / 2;
|
||||
return 81 + gridY * 8 + gridX; // 81-144
|
||||
}
|
||||
|
||||
return -1; // Invalid position
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
556
src/pipeline/wmo_loader.cpp
Normal file
556
src/pipeline/wmo_loader.cpp
Normal file
|
|
@ -0,0 +1,556 @@
|
|||
#include "pipeline/wmo_loader.hpp"
|
||||
#include "core/logger.hpp"
|
||||
#include <cstring>
|
||||
#include <glm/gtc/quaternion.hpp>
|
||||
|
||||
namespace wowee {
|
||||
namespace pipeline {
|
||||
|
||||
namespace {
|
||||
|
||||
// WMO chunk identifiers
|
||||
constexpr uint32_t MVER = 0x4D564552; // Version
|
||||
constexpr uint32_t MOHD = 0x4D4F4844; // Header
|
||||
constexpr uint32_t MOTX = 0x4D4F5458; // Textures
|
||||
constexpr uint32_t MOMT = 0x4D4F4D54; // Materials
|
||||
constexpr uint32_t MOGN = 0x4D4F474E; // Group names
|
||||
constexpr uint32_t MOGI = 0x4D4F4749; // Group info
|
||||
constexpr uint32_t MOLT = 0x4D4F4C54; // Lights
|
||||
constexpr uint32_t MODN = 0x4D4F444E; // Doodad names
|
||||
constexpr uint32_t MODD = 0x4D4F4444; // Doodad definitions
|
||||
constexpr uint32_t MODS = 0x4D4F4453; // Doodad sets
|
||||
constexpr uint32_t MOPV = 0x4D4F5056; // Portal vertices
|
||||
constexpr uint32_t MOPT = 0x4D4F5054; // Portal info
|
||||
constexpr uint32_t MOPR = 0x4D4F5052; // Portal references
|
||||
constexpr uint32_t MFOG = 0x4D464F47; // Fog
|
||||
|
||||
// WMO group chunk identifiers
|
||||
constexpr uint32_t MOGP = 0x4D4F4750; // Group header
|
||||
constexpr uint32_t MOVV = 0x4D4F5656; // Vertices
|
||||
constexpr uint32_t MOVI = 0x4D4F5649; // Indices
|
||||
constexpr uint32_t MOBA = 0x4D4F4241; // Batches
|
||||
constexpr uint32_t MOCV = 0x4D4F4356; // Vertex colors
|
||||
constexpr uint32_t MONR = 0x4D4F4E52; // Normals
|
||||
constexpr uint32_t MOTV = 0x4D4F5456; // Texture coords
|
||||
|
||||
// Read utilities
|
||||
template<typename T>
|
||||
T read(const std::vector<uint8_t>& data, uint32_t& offset) {
|
||||
if (offset + sizeof(T) > data.size()) {
|
||||
return T{};
|
||||
}
|
||||
T value;
|
||||
std::memcpy(&value, &data[offset], sizeof(T));
|
||||
offset += sizeof(T);
|
||||
return value;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::vector<T> readArray(const std::vector<uint8_t>& data, uint32_t offset, uint32_t count) {
|
||||
std::vector<T> result;
|
||||
if (offset + count * sizeof(T) > data.size()) {
|
||||
return result;
|
||||
}
|
||||
result.resize(count);
|
||||
std::memcpy(result.data(), &data[offset], count * sizeof(T));
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string readString(const std::vector<uint8_t>& data, uint32_t offset) {
|
||||
std::string result;
|
||||
while (offset < data.size() && data[offset] != 0) {
|
||||
result += static_cast<char>(data[offset++]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
WMOModel WMOLoader::load(const std::vector<uint8_t>& wmoData) {
|
||||
WMOModel model;
|
||||
|
||||
if (wmoData.size() < 8) {
|
||||
core::Logger::getInstance().error("WMO data too small");
|
||||
return model;
|
||||
}
|
||||
|
||||
core::Logger::getInstance().info("Loading WMO model...");
|
||||
|
||||
uint32_t offset = 0;
|
||||
|
||||
// Parse chunks
|
||||
while (offset + 8 <= wmoData.size()) {
|
||||
uint32_t chunkId = read<uint32_t>(wmoData, offset);
|
||||
uint32_t chunkSize = read<uint32_t>(wmoData, offset);
|
||||
|
||||
if (offset + chunkSize > wmoData.size()) {
|
||||
core::Logger::getInstance().warning("Chunk extends beyond file");
|
||||
break;
|
||||
}
|
||||
|
||||
uint32_t chunkStart = offset;
|
||||
uint32_t chunkEnd = offset + chunkSize;
|
||||
|
||||
switch (chunkId) {
|
||||
case MVER: {
|
||||
model.version = read<uint32_t>(wmoData, offset);
|
||||
core::Logger::getInstance().info("WMO version: ", model.version);
|
||||
break;
|
||||
}
|
||||
|
||||
case MOHD: {
|
||||
// Header
|
||||
model.nGroups = read<uint32_t>(wmoData, offset);
|
||||
model.nPortals = read<uint32_t>(wmoData, offset);
|
||||
model.nLights = read<uint32_t>(wmoData, offset);
|
||||
model.nDoodadNames = read<uint32_t>(wmoData, offset);
|
||||
model.nDoodadDefs = read<uint32_t>(wmoData, offset);
|
||||
model.nDoodadSets = read<uint32_t>(wmoData, offset);
|
||||
|
||||
[[maybe_unused]] uint32_t ambColor = read<uint32_t>(wmoData, offset); // Ambient color
|
||||
[[maybe_unused]] uint32_t wmoID = read<uint32_t>(wmoData, offset);
|
||||
|
||||
model.boundingBoxMin.x = read<float>(wmoData, offset);
|
||||
model.boundingBoxMin.y = read<float>(wmoData, offset);
|
||||
model.boundingBoxMin.z = read<float>(wmoData, offset);
|
||||
|
||||
model.boundingBoxMax.x = read<float>(wmoData, offset);
|
||||
model.boundingBoxMax.y = read<float>(wmoData, offset);
|
||||
model.boundingBoxMax.z = read<float>(wmoData, offset);
|
||||
|
||||
core::Logger::getInstance().info("WMO groups: ", model.nGroups);
|
||||
break;
|
||||
}
|
||||
|
||||
case MOTX: {
|
||||
// Textures - raw block of null-terminated strings
|
||||
// Material texture1/texture2/texture3 are byte offsets into this chunk.
|
||||
// We must map every offset to its texture index.
|
||||
uint32_t texOffset = chunkStart;
|
||||
uint32_t texIndex = 0;
|
||||
core::Logger::getInstance().info("MOTX chunk: ", chunkSize, " bytes");
|
||||
while (texOffset < chunkEnd) {
|
||||
uint32_t relativeOffset = texOffset - chunkStart;
|
||||
|
||||
std::string texName = readString(wmoData, texOffset);
|
||||
if (texName.empty()) {
|
||||
// Skip null bytes (empty entries or padding)
|
||||
texOffset++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Store mapping from byte offset to texture index
|
||||
model.textureOffsetToIndex[relativeOffset] = texIndex;
|
||||
model.textures.push_back(texName);
|
||||
core::Logger::getInstance().info(" MOTX texture[", texIndex, "] at offset ", relativeOffset, ": ", texName);
|
||||
texOffset += texName.length() + 1;
|
||||
texIndex++;
|
||||
}
|
||||
core::Logger::getInstance().info("WMO textures: ", model.textures.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MOMT: {
|
||||
// Materials - dump raw fields to find correct layout
|
||||
uint32_t nMaterials = chunkSize / 64; // Each material is 64 bytes
|
||||
for (uint32_t i = 0; i < nMaterials; i++) {
|
||||
// Read all 16 uint32 fields (64 bytes)
|
||||
uint32_t fields[16];
|
||||
for (int j = 0; j < 16; j++) {
|
||||
fields[j] = read<uint32_t>(wmoData, offset);
|
||||
}
|
||||
|
||||
// SMOMaterial layout (wowdev.wiki):
|
||||
// 0: flags, 1: shader, 2: blendMode
|
||||
// 3: texture_1 (MOTX offset)
|
||||
// 4: sidnColor (emissive), 5: frameSidnColor
|
||||
// 6: texture_2 (MOTX offset)
|
||||
// 7: diffColor, 8: ground_type
|
||||
// 9: texture_3 (MOTX offset)
|
||||
// 10: color_2, 11: flags2
|
||||
// 12-15: runtime
|
||||
WMOMaterial mat;
|
||||
mat.flags = fields[0];
|
||||
mat.shader = fields[1];
|
||||
mat.blendMode = fields[2];
|
||||
mat.texture1 = fields[3];
|
||||
mat.color1 = fields[4];
|
||||
mat.texture2 = fields[6]; // Skip frameSidnColor at [5]
|
||||
mat.color2 = fields[7];
|
||||
mat.texture3 = fields[9]; // Skip ground_type at [8]
|
||||
mat.color3 = fields[10];
|
||||
|
||||
model.materials.push_back(mat);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO materials: ", model.materials.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MOGN: {
|
||||
// Group names
|
||||
uint32_t nameOffset = chunkStart;
|
||||
while (nameOffset < chunkEnd) {
|
||||
std::string name = readString(wmoData, nameOffset);
|
||||
if (name.empty()) break;
|
||||
model.groupNames.push_back(name);
|
||||
nameOffset += name.length() + 1;
|
||||
}
|
||||
core::Logger::getInstance().info("WMO group names: ", model.groupNames.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MOGI: {
|
||||
// Group info
|
||||
uint32_t nGroupInfo = chunkSize / 32; // Each group info is 32 bytes
|
||||
for (uint32_t i = 0; i < nGroupInfo; i++) {
|
||||
WMOGroupInfo info;
|
||||
info.flags = read<uint32_t>(wmoData, offset);
|
||||
info.boundingBoxMin.x = read<float>(wmoData, offset);
|
||||
info.boundingBoxMin.y = read<float>(wmoData, offset);
|
||||
info.boundingBoxMin.z = read<float>(wmoData, offset);
|
||||
info.boundingBoxMax.x = read<float>(wmoData, offset);
|
||||
info.boundingBoxMax.y = read<float>(wmoData, offset);
|
||||
info.boundingBoxMax.z = read<float>(wmoData, offset);
|
||||
info.nameOffset = read<int32_t>(wmoData, offset);
|
||||
|
||||
model.groupInfo.push_back(info);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO group info: ", model.groupInfo.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MOLT: {
|
||||
// Lights
|
||||
uint32_t nLights = chunkSize / 48; // Approximate size
|
||||
for (uint32_t i = 0; i < nLights && offset < chunkEnd; i++) {
|
||||
WMOLight light;
|
||||
light.type = read<uint32_t>(wmoData, offset);
|
||||
light.useAttenuation = read<uint8_t>(wmoData, offset);
|
||||
light.pad[0] = read<uint8_t>(wmoData, offset);
|
||||
light.pad[1] = read<uint8_t>(wmoData, offset);
|
||||
light.pad[2] = read<uint8_t>(wmoData, offset);
|
||||
|
||||
light.color.r = read<float>(wmoData, offset);
|
||||
light.color.g = read<float>(wmoData, offset);
|
||||
light.color.b = read<float>(wmoData, offset);
|
||||
light.color.a = read<float>(wmoData, offset);
|
||||
|
||||
light.position.x = read<float>(wmoData, offset);
|
||||
light.position.y = read<float>(wmoData, offset);
|
||||
light.position.z = read<float>(wmoData, offset);
|
||||
|
||||
light.intensity = read<float>(wmoData, offset);
|
||||
light.attenuationStart = read<float>(wmoData, offset);
|
||||
light.attenuationEnd = read<float>(wmoData, offset);
|
||||
|
||||
for (int j = 0; j < 4; j++) {
|
||||
light.unknown[j] = read<float>(wmoData, offset);
|
||||
}
|
||||
|
||||
model.lights.push_back(light);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO lights: ", model.lights.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MODN: {
|
||||
// Doodad names — stored by byte offset into the MODN chunk
|
||||
// (MODD nameIndex is a byte offset, not a vector index)
|
||||
uint32_t nameOffset = 0; // Offset relative to chunk start
|
||||
while (chunkStart + nameOffset < chunkEnd) {
|
||||
std::string name = readString(wmoData, chunkStart + nameOffset);
|
||||
if (!name.empty()) {
|
||||
model.doodadNames[nameOffset] = name;
|
||||
}
|
||||
nameOffset += name.length() + 1;
|
||||
}
|
||||
core::Logger::getInstance().debug("Loaded ", model.doodadNames.size(), " doodad names");
|
||||
break;
|
||||
}
|
||||
|
||||
case MODD: {
|
||||
// Doodad definitions
|
||||
uint32_t nDoodads = chunkSize / 40; // Each doodad is 40 bytes
|
||||
for (uint32_t i = 0; i < nDoodads; i++) {
|
||||
WMODoodad doodad;
|
||||
|
||||
// Name index (3 bytes) + flags (1 byte)
|
||||
uint32_t nameAndFlags = read<uint32_t>(wmoData, offset);
|
||||
doodad.nameIndex = nameAndFlags & 0x00FFFFFF;
|
||||
|
||||
doodad.position.x = read<float>(wmoData, offset);
|
||||
doodad.position.y = read<float>(wmoData, offset);
|
||||
doodad.position.z = read<float>(wmoData, offset);
|
||||
|
||||
// C4Quaternion in file: x, y, z, w
|
||||
doodad.rotation.x = read<float>(wmoData, offset);
|
||||
doodad.rotation.y = read<float>(wmoData, offset);
|
||||
doodad.rotation.z = read<float>(wmoData, offset);
|
||||
doodad.rotation.w = read<float>(wmoData, offset);
|
||||
|
||||
doodad.scale = read<float>(wmoData, offset);
|
||||
|
||||
uint32_t color = read<uint32_t>(wmoData, offset);
|
||||
doodad.color.b = ((color >> 0) & 0xFF) / 255.0f;
|
||||
doodad.color.g = ((color >> 8) & 0xFF) / 255.0f;
|
||||
doodad.color.r = ((color >> 16) & 0xFF) / 255.0f;
|
||||
doodad.color.a = ((color >> 24) & 0xFF) / 255.0f;
|
||||
|
||||
model.doodads.push_back(doodad);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO doodads: ", model.doodads.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MODS: {
|
||||
// Doodad sets
|
||||
uint32_t nSets = chunkSize / 32; // Each set is 32 bytes
|
||||
for (uint32_t i = 0; i < nSets; i++) {
|
||||
WMODoodadSet set;
|
||||
std::memcpy(set.name, &wmoData[offset], 20);
|
||||
offset += 20;
|
||||
set.startIndex = read<uint32_t>(wmoData, offset);
|
||||
set.count = read<uint32_t>(wmoData, offset);
|
||||
set.padding = read<uint32_t>(wmoData, offset);
|
||||
|
||||
model.doodadSets.push_back(set);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO doodad sets: ", model.doodadSets.size());
|
||||
break;
|
||||
}
|
||||
|
||||
case MOPV: {
|
||||
// Portal vertices
|
||||
uint32_t nVerts = chunkSize / 12; // Each vertex is 3 floats
|
||||
for (uint32_t i = 0; i < nVerts; i++) {
|
||||
glm::vec3 vert;
|
||||
vert.x = read<float>(wmoData, offset);
|
||||
vert.y = read<float>(wmoData, offset);
|
||||
vert.z = read<float>(wmoData, offset);
|
||||
model.portalVertices.push_back(vert);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case MOPT: {
|
||||
// Portal info
|
||||
uint32_t nPortals = chunkSize / 20; // Each portal reference is 20 bytes
|
||||
for (uint32_t i = 0; i < nPortals; i++) {
|
||||
WMOPortal portal;
|
||||
portal.startVertex = read<uint16_t>(wmoData, offset);
|
||||
portal.vertexCount = read<uint16_t>(wmoData, offset);
|
||||
portal.planeIndex = read<uint16_t>(wmoData, offset);
|
||||
portal.padding = read<uint16_t>(wmoData, offset);
|
||||
|
||||
// Skip additional data (12 bytes)
|
||||
offset += 12;
|
||||
|
||||
model.portals.push_back(portal);
|
||||
}
|
||||
core::Logger::getInstance().info("WMO portals: ", model.portals.size());
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
// Unknown chunk, skip it
|
||||
break;
|
||||
}
|
||||
|
||||
offset = chunkEnd;
|
||||
}
|
||||
|
||||
// Initialize groups array
|
||||
model.groups.resize(model.nGroups);
|
||||
|
||||
core::Logger::getInstance().info("WMO model loaded successfully");
|
||||
return model;
|
||||
}
|
||||
|
||||
bool WMOLoader::loadGroup(const std::vector<uint8_t>& groupData,
|
||||
WMOModel& model,
|
||||
uint32_t groupIndex) {
|
||||
if (groupIndex >= model.groups.size()) {
|
||||
core::Logger::getInstance().error("Invalid group index: ", groupIndex);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (groupData.size() < 20) {
|
||||
core::Logger::getInstance().error("WMO group file too small");
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& group = model.groups[groupIndex];
|
||||
group.groupId = groupIndex;
|
||||
|
||||
uint32_t offset = 0;
|
||||
|
||||
// Parse chunks in group file
|
||||
while (offset + 8 < groupData.size()) {
|
||||
uint32_t chunkId = read<uint32_t>(groupData, offset);
|
||||
uint32_t chunkSize = read<uint32_t>(groupData, offset);
|
||||
uint32_t chunkEnd = offset + chunkSize;
|
||||
|
||||
if (chunkEnd > groupData.size()) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (chunkId == MVER) {
|
||||
// Version - skip
|
||||
}
|
||||
else if (chunkId == MOGP) {
|
||||
// Group header - parse sub-chunks
|
||||
// MOGP header is 68 bytes, followed by sub-chunks
|
||||
if (chunkSize < 68) {
|
||||
offset = chunkEnd;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read MOGP header
|
||||
uint32_t mogpOffset = offset;
|
||||
group.flags = read<uint32_t>(groupData, mogpOffset);
|
||||
group.boundingBoxMin.x = read<float>(groupData, mogpOffset);
|
||||
group.boundingBoxMin.y = read<float>(groupData, mogpOffset);
|
||||
group.boundingBoxMin.z = read<float>(groupData, mogpOffset);
|
||||
group.boundingBoxMax.x = read<float>(groupData, mogpOffset);
|
||||
group.boundingBoxMax.y = read<float>(groupData, mogpOffset);
|
||||
group.boundingBoxMax.z = read<float>(groupData, mogpOffset);
|
||||
mogpOffset += 4; // nameOffset
|
||||
group.portalStart = read<uint16_t>(groupData, mogpOffset);
|
||||
group.portalCount = read<uint16_t>(groupData, mogpOffset);
|
||||
mogpOffset += 8; // transBatchCount, intBatchCount, extBatchCount, padding
|
||||
group.fogIndices[0] = read<uint32_t>(groupData, mogpOffset);
|
||||
group.fogIndices[1] = read<uint32_t>(groupData, mogpOffset);
|
||||
group.fogIndices[2] = read<uint32_t>(groupData, mogpOffset);
|
||||
group.fogIndices[3] = read<uint32_t>(groupData, mogpOffset);
|
||||
group.liquidType = read<uint32_t>(groupData, mogpOffset);
|
||||
// Skip to end of 68-byte header
|
||||
mogpOffset = offset + 68;
|
||||
|
||||
// Parse sub-chunks within MOGP
|
||||
while (mogpOffset + 8 < chunkEnd) {
|
||||
uint32_t subChunkId = read<uint32_t>(groupData, mogpOffset);
|
||||
uint32_t subChunkSize = read<uint32_t>(groupData, mogpOffset);
|
||||
uint32_t subChunkEnd = mogpOffset + subChunkSize;
|
||||
|
||||
if (subChunkEnd > chunkEnd) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Debug: log chunk magic as string
|
||||
char magic[5] = {0};
|
||||
magic[0] = (subChunkId >> 0) & 0xFF;
|
||||
magic[1] = (subChunkId >> 8) & 0xFF;
|
||||
magic[2] = (subChunkId >> 16) & 0xFF;
|
||||
magic[3] = (subChunkId >> 24) & 0xFF;
|
||||
static int logCount = 0;
|
||||
if (logCount < 30) {
|
||||
core::Logger::getInstance().debug(" WMO sub-chunk: ", magic, " (0x", std::hex, subChunkId, std::dec, ") size=", subChunkSize);
|
||||
logCount++;
|
||||
}
|
||||
|
||||
if (subChunkId == 0x4D4F5654) { // MOVT - Vertices
|
||||
uint32_t vertexCount = subChunkSize / 12; // 3 floats per vertex
|
||||
for (uint32_t i = 0; i < vertexCount; i++) {
|
||||
WMOVertex vertex;
|
||||
vertex.position.x = read<float>(groupData, mogpOffset);
|
||||
vertex.position.y = read<float>(groupData, mogpOffset);
|
||||
vertex.position.z = read<float>(groupData, mogpOffset);
|
||||
vertex.normal = glm::vec3(0, 0, 1);
|
||||
vertex.texCoord = glm::vec2(0, 0);
|
||||
vertex.color = glm::vec4(1, 1, 1, 1);
|
||||
group.vertices.push_back(vertex);
|
||||
}
|
||||
}
|
||||
else if (subChunkId == 0x4D4F5649) { // MOVI - Indices
|
||||
uint32_t indexCount = subChunkSize / 2; // uint16_t per index
|
||||
for (uint32_t i = 0; i < indexCount; i++) {
|
||||
group.indices.push_back(read<uint16_t>(groupData, mogpOffset));
|
||||
}
|
||||
}
|
||||
else if (subChunkId == 0x4D4F4E52) { // MONR - Normals
|
||||
uint32_t normalCount = subChunkSize / 12;
|
||||
for (uint32_t i = 0; i < normalCount && i < group.vertices.size(); i++) {
|
||||
group.vertices[i].normal.x = read<float>(groupData, mogpOffset);
|
||||
group.vertices[i].normal.y = read<float>(groupData, mogpOffset);
|
||||
group.vertices[i].normal.z = read<float>(groupData, mogpOffset);
|
||||
}
|
||||
}
|
||||
else if (subChunkId == 0x4D4F5456) { // MOTV - Texture coords
|
||||
// Update texture coords for existing vertices
|
||||
uint32_t texCoordCount = subChunkSize / 8;
|
||||
core::Logger::getInstance().info(" MOTV: ", texCoordCount, " tex coords for ", group.vertices.size(), " vertices");
|
||||
for (uint32_t i = 0; i < texCoordCount && i < group.vertices.size(); i++) {
|
||||
group.vertices[i].texCoord.x = read<float>(groupData, mogpOffset);
|
||||
group.vertices[i].texCoord.y = read<float>(groupData, mogpOffset);
|
||||
}
|
||||
if (texCoordCount > 0 && !group.vertices.empty()) {
|
||||
core::Logger::getInstance().debug(" First UV: (", group.vertices[0].texCoord.x, ", ", group.vertices[0].texCoord.y, ")");
|
||||
}
|
||||
}
|
||||
else if (subChunkId == 0x4D4F4356) { // MOCV - Vertex colors
|
||||
// Update vertex colors
|
||||
uint32_t colorCount = subChunkSize / 4;
|
||||
for (uint32_t i = 0; i < colorCount && i < group.vertices.size(); i++) {
|
||||
uint8_t b = read<uint8_t>(groupData, mogpOffset);
|
||||
uint8_t g = read<uint8_t>(groupData, mogpOffset);
|
||||
uint8_t r = read<uint8_t>(groupData, mogpOffset);
|
||||
uint8_t a = read<uint8_t>(groupData, mogpOffset);
|
||||
group.vertices[i].color = glm::vec4(r/255.0f, g/255.0f, b/255.0f, a/255.0f);
|
||||
}
|
||||
}
|
||||
else if (subChunkId == 0x4D4F4241) { // MOBA - Batches
|
||||
// SMOBatch structure (24 bytes):
|
||||
// - 6 x int16 bounding box (12 bytes)
|
||||
// - uint32 startIndex (4 bytes)
|
||||
// - uint16 count (2 bytes)
|
||||
// - uint16 minIndex (2 bytes)
|
||||
// - uint16 maxIndex (2 bytes)
|
||||
// - uint8 flags (1 byte)
|
||||
// - uint8 material_id (1 byte)
|
||||
uint32_t batchCount = subChunkSize / 24;
|
||||
for (uint32_t i = 0; i < batchCount; i++) {
|
||||
WMOBatch batch;
|
||||
mogpOffset += 12; // Skip bounding box (6 x int16 = 12 bytes)
|
||||
batch.startIndex = read<uint32_t>(groupData, mogpOffset);
|
||||
batch.indexCount = read<uint16_t>(groupData, mogpOffset);
|
||||
batch.startVertex = read<uint16_t>(groupData, mogpOffset);
|
||||
batch.lastVertex = read<uint16_t>(groupData, mogpOffset);
|
||||
batch.flags = read<uint8_t>(groupData, mogpOffset);
|
||||
batch.materialId = read<uint8_t>(groupData, mogpOffset);
|
||||
group.batches.push_back(batch);
|
||||
|
||||
static int batchLogCount = 0;
|
||||
if (batchLogCount < 15) {
|
||||
core::Logger::getInstance().info(" Batch[", i, "]: start=", batch.startIndex,
|
||||
" count=", batch.indexCount, " verts=[", batch.startVertex, "-",
|
||||
batch.lastVertex, "] mat=", (int)batch.materialId, " flags=", (int)batch.flags);
|
||||
batchLogCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mogpOffset = subChunkEnd;
|
||||
}
|
||||
}
|
||||
|
||||
offset = chunkEnd;
|
||||
}
|
||||
|
||||
// Create a default batch if none were loaded
|
||||
if (group.batches.empty() && !group.indices.empty()) {
|
||||
WMOBatch batch;
|
||||
batch.startIndex = 0;
|
||||
batch.indexCount = static_cast<uint16_t>(group.indices.size());
|
||||
batch.materialId = 0;
|
||||
group.batches.push_back(batch);
|
||||
}
|
||||
|
||||
core::Logger::getInstance().info("WMO group ", groupIndex, " loaded: ",
|
||||
group.vertices.size(), " vertices, ",
|
||||
group.indices.size(), " indices, ",
|
||||
group.batches.size(), " batches");
|
||||
return !group.vertices.empty() && !group.indices.empty();
|
||||
}
|
||||
|
||||
} // namespace pipeline
|
||||
} // namespace wowee
|
||||
Loading…
Add table
Add a link
Reference in a new issue