/* <2ae8e> ../engine/hashpak.c:1599 */ NOXREF char *HPAK_GetItem(int item) { NOXREFCHECK; int nCurrent; hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; static char name[MAX_PATH]; char szFileName[MAX_PATH]; FileHandle_t fp; HPAK_FlushHostQueue(); Q_snprintf(name, ARRAYSIZE(name), "%s", "custom"); COM_DefaultExtension(name, HASHPAK_EXTENSION); fp = FS_Open(name, "rb"); if (!fp) return ""; FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp))) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return ""; } if (header.version != HASHPAK_VERSION) { Con_Printf("HPAK_List: version mismatch\n"); FS_Close(fp); return ""; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return ""; } directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); nCurrent = directory.nEntries - 1; if (nCurrent > item) nCurrent = item; entry = &directory.p_rgEntries[nCurrent]; COM_FileBase(entry->resource.szFileName, szFileName); Q_snprintf(name, sizeof(name), "!MD5%s", MD5_Print(entry->resource.rgucMD5_hash)); FS_Close(fp); Mem_Free(directory.p_rgEntries); return name; }
/* * =========== * AllocPortal * =========== */ static portal_t *AllocPortal(void) { if (debug) { SDL_SemPost(semaphores.active_portals); const uint32_t active_portals = SDL_SemValue(semaphores.active_portals); if (active_portals > c_peak_portals) c_peak_portals = active_portals; } return Mem_Malloc(sizeof(portal_t)); }
/* * @brief */ static winding_t *NewWinding(uint16_t points) { winding_t *w; size_t size; if (points > MAX_POINTS_ON_WINDING) Com_Error(ERR_FATAL, "MAX_POINTS_ON_WINDING\n"); size = (size_t) ((winding_t *) 0)->points[points]; w = Mem_Malloc(size); return w; }
/* * @brief */ winding_t *AllocWinding(int32_t points) { if (debug) { SDL_SemPost(semaphores.active_windings); uint32_t active_windings = SDL_SemValue(semaphores.active_windings); if (active_windings > c_peak_windings) { c_peak_windings = active_windings; } } return Mem_Malloc(sizeof(int32_t) + sizeof(vec3_t) * points); }
/** * @brief Resets the stainmaps that we have loaded, for level changes. This is kind of a * slow function, so be careful calling this one. */ void R_ResetStainmap(void) { GHashTable *hash = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, Mem_Free); for (uint32_t s = 0; s < r_model_state.world->bsp->num_surfaces; s++) { r_bsp_surface_t *surf = r_model_state.world->bsp->surfaces + s; // skip if we don't have a stainmap or we weren't stained if (!surf->stainmap || !surf->stainmap_dirty) { continue; } byte *lightmap = (byte *) g_hash_table_lookup(hash, surf->stainmap); if (!lightmap) { lightmap = Mem_Malloc(surf->lightmap->width * surf->lightmap->height * 3); R_BindDiffuseTexture(surf->lightmap->texnum); glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_UNSIGNED_BYTE, lightmap); R_BindDiffuseTexture(surf->stainmap->texnum); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, surf->lightmap->width, surf->lightmap->height, 0, GL_RGB, GL_UNSIGNED_BYTE, lightmap); g_hash_table_insert(hash, surf->stainmap, lightmap); } const size_t offset = (surf->lightmap_t * surf->lightmap->width + surf->lightmap_s) * 3; const size_t stride = surf->lightmap->width * 3; byte *lm = lightmap + offset; byte *sm = surf->stainmap_buffer; for (int16_t t = 0; t < surf->lightmap_size[1]; t++) { memcpy(sm, lm, surf->lightmap_size[0] * 3); sm += surf->lightmap_size[0] * 3; lm += stride; } surf->stainmap_dirty = false; } g_hash_table_destroy(hash); }
/* * @brief Initializes the threads backing the thread pool. */ static void Thread_Init_(uint16_t num_threads) { thread_pool.num_threads = MIN(num_threads, MAX_THREADS); if (thread_pool.num_threads) { thread_pool.threads = Mem_Malloc(sizeof(thread_t) * thread_pool.num_threads); thread_t *t = thread_pool.threads; uint16_t i = 0; for (i = 0; i < thread_pool.num_threads; i++, t++) { t->cond = SDL_CreateCond(); t->mutex = SDL_CreateMutex(); t->thread = SDL_CreateThread(Thread_Run, t); } } }
SystemWrapper::library_t *SystemWrapper::GetLibrary(char *name) { char fixedname[MAX_PATH]; Q_strlcpy(fixedname, name); COM_FixSlashes(fixedname); library_t *lib = (library_t *)m_Libraries.GetFirst(); while (lib) { if (Q_stricmp(lib->name, name) == 0) { return lib; } lib = (library_t *)m_Libraries.GetNext(); } lib = (library_t *)Mem_Malloc(sizeof(library_t)); if (!lib) { DPrintf("ERROR! System::GetLibrary: out of memory (%s).\n", name); return nullptr; } Q_snprintf(lib->name, sizeof(lib->name), "%s." LIBRARY_PREFIX, fixedname); FS_GetLocalCopy(lib->name); lib->handle = (CSysModule *)Sys_LoadModule(lib->name); if (!lib->handle) { DPrintf("ERROR! System::GetLibrary: coulnd't load library (%s).\n", lib->name); Mem_Free(lib); return nullptr; } lib->createInterfaceFn = (CreateInterfaceFn)Sys_GetFactory(lib->handle); if (!lib->createInterfaceFn) { DPrintf("ERROR! System::GetLibrary: coulnd't get object factory(%s).\n", lib->name); Mem_Free(lib); return nullptr; } m_Libraries.Add(lib); DPrintf("Loaded library %s.\n", lib->name); return lib; }
/** * @brief */ static void SubdividePatch(patch_t *patch) { winding_t *w, *o1, *o2; vec3_t mins, maxs; vec3_t split; vec_t dist; int32_t i; patch_t *newp; w = patch->winding; WindingBounds(w, mins, maxs); VectorClear(split); for (i = 0; i < 3; i++) { if (floor((mins[i] + 1) / PATCH_SUBDIVIDE) < floor((maxs[i] - 1) / PATCH_SUBDIVIDE)) { split[i] = 1.0; break; } } if (i == 3) { // no splitting needed return; } dist = PATCH_SUBDIVIDE * (1 + floor((mins[i] + 1) / PATCH_SUBDIVIDE)); ClipWindingEpsilon(w, split, dist, ON_EPSILON, &o1, &o2); // create a new patch newp = (patch_t *) Mem_Malloc(sizeof(*newp)); newp->next = patch->next; patch->next = newp; patch->winding = o1; newp->winding = o2; FinishSubdividePatch(patch, newp); SubdividePatch(patch); SubdividePatch(newp); }
void kexInputKey::AddAction(byte id, const char *name) { keyaction_t *keyaction; unsigned int hash; if(strlen(name) >= MAX_FILEPATH) common.Error("Key_AddAction: \"%s\" is too long", name); if(!command.Verify(name)) { return; } keyaction = (keyaction_t*)Mem_Malloc(sizeof(keyaction_t), hb_static); keyaction->keyid = id; strcpy(keyaction->name, name); command.Add(keyaction->name, FCmd_KeyAction); hash = kexStr::Hash(keyaction->name); keyaction->next = keyactions[hash]; keyactions[hash] = keyaction; }
char *Str_Dup( const char *s) { /*------------------------------------------- Duplicate a string s by allocating memory for it, copying s to the new location and returning a pointer to the new string. Provides error handling by failing. cwb - 9/13/01 -------------------------------------------*/ char *p; p = (char *)Mem_Malloc(strlen(s)+1, "Str_Dup()"); strcpy(p, s); return p; }
/* <d37d6> ../engine/net_ws.c:695 */ void NET_AddToLagged(netsrc_t sock, packetlag_t *pList, packetlag_t *pPacket, netadr_t *net_from_, sizebuf_t messagedata, float timestamp) { unsigned char *pStart; if (pPacket->pPrev || pPacket->pNext) { Con_Printf("Packet already linked\n"); return; } pPacket->pPrev = pList->pPrev; pList->pPrev->pNext = pPacket; pList->pPrev = pPacket; pPacket->pNext = pList; pStart = (unsigned char *)Mem_Malloc(messagedata.cursize); Q_memcpy(pStart, messagedata.data, messagedata.cursize); pPacket->pPacketData = pStart; pPacket->nSize = messagedata.cursize; pPacket->receivedTime = timestamp; Q_memcpy(&pPacket->net_from_, net_from_, sizeof(netadr_t)); }
void *Mem_Calloc( size_t nobjs, size_t size, const char *funcname) { /*------------------------------------------- Duplicates the behavior of calloc() similar to Mem_Malloc. cwb - 8/10/01 -------------------------------------------*/ void *p; p = Mem_Malloc( size * nobjs, funcname); #ifndef DEBUG_MEM /* if using mcguire's code, no need to memset after * malloc() as it is done there. */ Mem_Set(p, 0, size * nobjs); #endif return p; }
/** * @brief Adds the specified server to the master. */ static void Ms_AddServer(struct sockaddr_in *from) { if (Ms_GetServer(from)) { Com_Warn("Duplicate ping from %s\n", atos(from)); return; } if (Ms_BlacklistServer(from)) { Com_Warn("Server %s has been blacklisted\n", atos(from)); return; } ms_server_t *server = Mem_Malloc(sizeof(ms_server_t)); server->addr = *from; server->last_heartbeat = time(NULL); ms_servers = g_list_prepend(ms_servers, server); Com_Print("Server %s registered\n", stos(server)); // send an acknowledgment sendto(ms_sock, "\xFF\xFF\xFF\xFF" "ack", 7, 0, (struct sockaddr *) from, sizeof(*from)); }
/* * @brief Captures a screenshot, writing it to the user's directory. */ void R_Screenshot_f(void) { static uint16_t last_screenshot; char filename[MAX_OS_PATH]; uint16_t i; // find a file name to save it to for (i = last_screenshot; i < MAX_SCREENSHOTS; i++) { g_snprintf(filename, sizeof(filename), "screenshots/quetoo%03u.jpg", i); if (!Fs_Exists(filename)) break; // file doesn't exist } if (i == MAX_SCREENSHOTS) { Com_Warn("Failed to create %s\n", filename); return; } last_screenshot = i; // save for next call const uint32_t width = r_context.width; const uint32_t height = r_context.height; byte *buffer = Mem_Malloc(width * height * 3); glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, buffer); const int32_t quality = Clamp(r_screenshot_quality->integer, 0, 100); if (Img_WriteJPEG(filename, buffer, width, height, quality)) { Com_Print("Saved %s\n", Basename(filename)); } else { Com_Warn("Failed to write %s\n", filename); } Mem_Free(buffer); }
/* <2abc5> ../engine/hashpak.c:855 */ qboolean HPAK_ResourceForHash(char *pakname, unsigned char *hash, struct resource_s *pResourceEntry) { qboolean bFound; hash_pack_header_t header; hash_pack_directory_t directory; char name[MAX_PATH]; FileHandle_t fp; if (gp_hpak_queue) { for (hash_pack_queue_t *p = gp_hpak_queue; p != NULL; p = p->next) { if (Q_stricmp(p->pakname, pakname) != 0 || Q_memcmp(p->resource.rgucMD5_hash, hash, 16) != 0) continue; if (pResourceEntry) Q_memcpy(pResourceEntry, &p->resource, sizeof(resource_t)); return TRUE; } } Q_snprintf(name, ARRAYSIZE(name), "%s", pakname); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); fp = FS_Open(name, "rb"); if (!fp) { Con_Printf("ERROR: couldn't open %s.\n", name); return FALSE; } FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp))) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return FALSE; } if (header.version != HASHPAK_VERSION) { Con_Printf("HPAK_List: version mismatch\n"); FS_Close(fp); return FALSE; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return FALSE; } directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); bFound = HPAK_FindResource(&directory, hash, pResourceEntry); FS_Close(fp); Mem_Free(directory.p_rgEntries); return bFound; }
/* <2a7cb> ../engine/hashpak.c:65 */ qboolean HPAK_GetDataPointer(char *pakname, struct resource_s *pResource, unsigned char **pbuffer, int *bufsize) { qboolean retval = FALSE; FileHandle_t fp; hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; char name[MAX_PATH]; byte *pbuf; if (pbuffer) *pbuffer = NULL; if (bufsize) *bufsize = 0; if (gp_hpak_queue) { for (hash_pack_queue_t *p = gp_hpak_queue; p != NULL; p = p->next) { if (Q_stricmp(p->pakname, pakname) != 0 || Q_memcmp(p->resource.rgucMD5_hash, pResource->rgucMD5_hash, 16) != 0) continue; if (pbuffer) { pbuf = (byte *)Mem_Malloc(p->datasize); if (!pbuf) Sys_Error("Error allocating %i bytes for hpak!", p->datasize); Q_memcpy((void *)pbuf, p->data, p->datasize); *pbuffer = pbuf; } if (bufsize) *bufsize = p->datasize; return TRUE; } } Q_snprintf(name, ARRAYSIZE(name), "%s", pakname); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); fp = FS_Open(name, "rb"); if (!fp) { return FALSE; } FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp)) != 0) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return FALSE; } if (header.version != HASHPAK_VERSION) { Con_Printf("HPAK_List: version mismatch\n"); FS_Close(fp); return FALSE; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return FALSE; } directory.p_rgEntries = (hash_pack_entry_t *)Mem_ZeroMalloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); for (int i = 0; i < directory.nEntries; i++) { entry = &directory.p_rgEntries[i]; if (Q_memcmp(entry->resource.rgucMD5_hash, pResource->rgucMD5_hash, 16) != 0) continue; retval = TRUE; FS_Seek(fp, entry->nOffset, FILESYSTEM_SEEK_HEAD); if (pbuffer && entry->nFileLength > 0) { if (bufsize) *bufsize = entry->nFileLength; pbuf = (byte *)Mem_Malloc(entry->nFileLength); if (!pbuf) { Con_Printf("Couln't allocate %i bytes for HPAK entry\n", entry->nFileLength); if (bufsize) *bufsize = 0; retval = FALSE; } FS_Read(pbuf, entry->nFileLength, 1, fp); *pbuffer = pbuf; } break; } Mem_Free(directory.p_rgEntries); FS_Close(fp); return retval; }
/* <2a974> ../engine/hashpak.c:598 */ void HPAK_RemoveLump(char *pakname, resource_t *pResource) { FileHandle_t fp; FileHandle_t tmp; char szTempName[MAX_PATH]; char szOriginalName[MAX_PATH]; hash_pack_directory_t olddir; hash_pack_directory_t newdir; hash_pack_entry_t *oldentry; hash_pack_entry_t *newentry; int n; int i; if (pakname == NULL || *pakname == '\0' || pResource == NULL) { Con_Printf(__FUNCTION__ ": Invalid arguments\n"); return; } HPAK_FlushHostQueue(); #ifdef REHLDS_FIXES Q_strncpy(szOriginalName, pakname, ARRAYSIZE(szOriginalName) - 1); szOriginalName[ARRAYSIZE(szOriginalName) - 1] = 0; COM_DefaultExtension(szOriginalName, HASHPAK_EXTENSION); #else //TODO: Not sure why Cmd_Argv(1) is used since function receives pakname parameter char name[MAX_PATH]; Q_snprintf(name, ARRAYSIZE(name), "%s", Cmd_Argv(1)); COM_DefaultExtension(name, HASHPAK_EXTENSION); Q_strncpy(szOriginalName, name, ARRAYSIZE(szOriginalName) - 1); szOriginalName[ARRAYSIZE(szOriginalName) - 1] = 0; #endif // REHLDS_FIXES fp = FS_Open(szOriginalName, "rb"); if (!fp) { Con_Printf("Error: couldn't open HPAK file %s for removal.\n", szOriginalName); return; } COM_StripExtension(szOriginalName, szTempName); COM_DefaultExtension(szTempName, ".hp2"); tmp = FS_Open(szTempName, "w+b"); if (!tmp) { FS_Close(fp); Con_Printf("ERROR: couldn't create %s.\n", szTempName); return; } FS_Seek(fp, 0, FILESYSTEM_SEEK_HEAD); FS_Seek(tmp, 0, FILESYSTEM_SEEK_HEAD); FS_Read(&hash_pack_header, sizeof(hash_pack_header_t), 1, fp); FS_Write(&hash_pack_header, sizeof(hash_pack_header_t), 1, tmp); if (Q_strncmp(hash_pack_header.szFileStamp, "HPAK", sizeof(hash_pack_header.szFileStamp))) { FS_Close(fp); FS_Close(tmp); FS_Unlink(szTempName); Con_Printf("%s is not an HPAK file\n", szOriginalName); return; } if (hash_pack_header.version != HASHPAK_VERSION) { FS_Close(fp); FS_Close(tmp); FS_Unlink(szTempName); Con_Printf("ERROR: HPAK version outdated\n"); return; } FS_Seek(fp, hash_pack_header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&olddir.nEntries, 4, 1, fp); if (olddir.nEntries < 1 || (unsigned int)olddir.nEntries > MAX_FILE_ENTRIES) { FS_Close(fp); FS_Close(tmp); FS_Unlink(szTempName); Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", olddir.nEntries); return; } if (olddir.nEntries == 1) { FS_Close(fp); FS_Close(tmp); FS_Unlink(szOriginalName); FS_Unlink(szTempName); Con_Printf("Removing final lump from HPAK, deleting HPAK:\n %s\n", szOriginalName); return; } olddir.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * olddir.nEntries); FS_Read(olddir.p_rgEntries, sizeof(hash_pack_entry_t) * olddir.nEntries, 1, fp); newdir.nEntries = olddir.nEntries - 1; newdir.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * newdir.nEntries); if (!HPAK_FindResource(&olddir, pResource->rgucMD5_hash, NULL)) { FS_Close(fp); FS_Close(tmp); FS_Unlink(szTempName); Mem_Free(olddir.p_rgEntries); Mem_Free(newdir.p_rgEntries); Con_Printf("ERROR: HPAK doesn't contain specified lump: %s\n", pResource->szFileName); return; } Con_Printf("Removing %s from HPAK %s.\n", pResource->szFileName, szOriginalName); for (i = 0, n = 0; i < olddir.nEntries; i++) { oldentry = &olddir.p_rgEntries[i]; if (Q_memcmp(olddir.p_rgEntries[i].resource.rgucMD5_hash, pResource->rgucMD5_hash, 16)) { newentry = &newdir.p_rgEntries[n++]; Q_memcpy(newentry, oldentry, sizeof(hash_pack_entry_t)); newentry->nOffset = FS_Tell(tmp); FS_Seek(fp, oldentry->nOffset, FILESYSTEM_SEEK_HEAD); COM_CopyFileChunk(tmp, fp, newentry->nFileLength); } } hash_pack_header.nDirectoryOffset = FS_Tell(tmp); FS_Write(&newdir.nEntries, 4, 1, tmp); for (i = 0; i < newdir.nEntries; i++) FS_Write(&newdir.p_rgEntries[i], sizeof(hash_pack_entry_t), 1, tmp); FS_Seek(tmp, 0, FILESYSTEM_SEEK_HEAD); FS_Write(&hash_pack_header, sizeof(hash_pack_header_t), 1, tmp); FS_Close(fp); FS_Close(tmp); FS_Unlink(szOriginalName); FS_Rename(szTempName, szOriginalName); Mem_Free(olddir.p_rgEntries); Mem_Free(newdir.p_rgEntries); }
/* <2a494> ../engine/hashpak.c:322 */ void HPAK_AddLump(qboolean bUseQueue, char *pakname, struct resource_s *pResource, void *pData, FileHandle_t fpSource) { FileHandle_t iRead; FileHandle_t iWrite; char name[MAX_PATH]; char szTempName[MAX_PATH]; char szOriginalName[MAX_PATH]; hash_pack_directory_t olddirectory; hash_pack_directory_t newdirectory; hash_pack_entry_t *pNewEntry; byte md5[16]; MD5Context_t ctx; byte *pDiskData; if (pakname == NULL) { Con_Printf("HPAK_AddLump called with invalid arguments: no .pak filename\n"); return; } if (!pResource) { Con_Printf("HPAK_AddLump called with invalid arguments: no lump to add\n"); return; } if (!pData && !fpSource) { Con_Printf("HPAK_AddLump called with invalid arguments: no file handle\n"); return; } if (pResource->nDownloadSize < 1024 || (unsigned int)pResource->nDownloadSize > MAX_FILE_SIZE) { Con_Printf("HPAK_AddLump called with bogus lump, size: %i\n", pResource->nDownloadSize); return; } Q_memset(&ctx, 0, sizeof(MD5Context_t)); MD5Init(&ctx); if (pData) MD5Update(&ctx, (byte *)pData, pResource->nDownloadSize); else { pDiskData = (byte *)Mem_Malloc(pResource->nDownloadSize + 1); Q_memset(pDiskData, 0, pResource->nDownloadSize); FS_Read(pDiskData, pResource->nDownloadSize, 1, fpSource); FS_Seek(fpSource, FS_Tell(fpSource), FILESYSTEM_SEEK_HEAD); MD5Update(&ctx, pDiskData, pResource->nDownloadSize); Mem_Free(pDiskData); } MD5Final(md5, &ctx); if (Q_memcmp(pResource->rgucMD5_hash, md5, sizeof(md5)) != 0) { Con_Printf("HPAK_AddLump called with bogus lump, md5 mismatch\n"); Con_Printf("Purported: %s\n", MD5_Print(pResource->rgucMD5_hash)); Con_Printf("Actual : %s\n", MD5_Print(md5)); Con_Printf("Ignoring lump addition\n"); return; } if (bUseQueue) { HPAK_AddToQueue(pakname, pResource, pData, fpSource); return; } Q_snprintf(name, ARRAYSIZE(name), "%s", pakname); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); COM_FixSlashes(name); Q_strncpy(szOriginalName, name, ARRAYSIZE(szOriginalName) - 1); szOriginalName[ARRAYSIZE(szOriginalName) - 1] = 0; iRead = FS_Open(name, "rb"); if (!iRead) { HPAK_CreatePak(pakname, pResource, pData, fpSource); return; } COM_StripExtension(name, szTempName); COM_DefaultExtension(szTempName, ".hp2"); iWrite = FS_Open(szTempName, "w+b"); if (!iWrite) { FS_Close(iRead); Con_Printf("ERROR: couldn't open %s.\n", szTempName); return; } FS_Read(&hash_pack_header, sizeof(hash_pack_header_t), 1, iRead); if (hash_pack_header.version != HASHPAK_VERSION) { FS_Close(iRead); FS_Close(iWrite); FS_Unlink(szTempName); Con_Printf("Invalid .hpk version in HPAK_AddLump\n"); return; } FS_Seek(iRead, 0, FILESYSTEM_SEEK_HEAD); COM_CopyFileChunk(iWrite, iRead, FS_Size(iRead)); FS_Seek(iRead, hash_pack_header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&olddirectory.nEntries, 4, 1, iRead); if (olddirectory.nEntries < 1 || (unsigned int)olddirectory.nEntries > MAX_FILE_ENTRIES) { FS_Close(iRead); FS_Close(iWrite); FS_Unlink(szTempName); Con_Printf("ERROR: .hpk had bogus # of directory entries: %i\n", olddirectory.nEntries); return; } olddirectory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * olddirectory.nEntries); FS_Read(olddirectory.p_rgEntries, sizeof(hash_pack_entry_t) * olddirectory.nEntries, 1, iRead); FS_Close(iRead); if (HPAK_FindResource(&olddirectory, pResource->rgucMD5_hash, NULL) != FALSE) { FS_Close(iWrite); FS_Unlink(szTempName); Mem_Free(olddirectory.p_rgEntries); return; } newdirectory.nEntries = olddirectory.nEntries + 1; newdirectory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * newdirectory.nEntries); Q_memset(newdirectory.p_rgEntries, 0, sizeof(hash_pack_entry_t) * newdirectory.nEntries); Q_memcpy(newdirectory.p_rgEntries, olddirectory.p_rgEntries, sizeof(hash_pack_entry_t) * olddirectory.nEntries); pNewEntry = NULL; for (int i = 0; i < olddirectory.nEntries; i++) { if (Q_memcmp(pResource->rgucMD5_hash, olddirectory.p_rgEntries[i].resource.rgucMD5_hash, 16) >= 0) { pNewEntry = &newdirectory.p_rgEntries[i]; #ifndef REHLDS_FIXES while (i < olddirectory.nEntries) { Q_memcpy(&newdirectory.p_rgEntries[i + 1], &olddirectory.p_rgEntries[i], sizeof(hash_pack_entry_t)); i++; } #else Q_memcpy(&newdirectory.p_rgEntries[i + 1], &olddirectory.p_rgEntries[i], (olddirectory.nEntries - i) * sizeof(hash_pack_entry_t)); #endif break; } } if (pNewEntry == NULL) { pNewEntry = &newdirectory.p_rgEntries[newdirectory.nEntries - 1]; } Q_memset(pNewEntry, 0, sizeof(hash_pack_entry_t)); FS_Seek(iWrite, hash_pack_header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); Q_memcpy(&pNewEntry->resource, pResource, sizeof(resource_t)); pNewEntry->nOffset = FS_Tell(iWrite); pNewEntry->nFileLength = pResource->nDownloadSize; if (pData) FS_Write(pData, pResource->nDownloadSize, 1, iWrite); else COM_CopyFileChunk(iWrite, fpSource, pResource->nDownloadSize); hash_pack_header.nDirectoryOffset = FS_Tell(iWrite); FS_Write(&newdirectory.nEntries, 4, 1, iWrite); for (int j = 0; j < newdirectory.nEntries; j++) FS_Write(&newdirectory.p_rgEntries[j], sizeof(hash_pack_entry_t), 1, iWrite); if (newdirectory.p_rgEntries) Mem_Free(newdirectory.p_rgEntries); if (olddirectory.p_rgEntries) Mem_Free(olddirectory.p_rgEntries); FS_Seek(iWrite, 0, FILESYSTEM_SEEK_HEAD); FS_Write(&hash_pack_header, sizeof(hash_pack_header_t), 1, iWrite); FS_Close(iWrite); FS_Unlink(szOriginalName); FS_Rename(szTempName, szOriginalName); }
/* <2afb5> ../engine/hashpak.c:1728 */ void HPAK_ValidatePak(char *fullpakname) { hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; char szFileName[MAX_PATH]; FileHandle_t fp; byte *pData; byte md5[16]; MD5Context_t ctx; HPAK_FlushHostQueue(); fp = FS_Open(fullpakname, "rb"); if (!fp) return; FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (header.version != HASHPAK_VERSION || Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp)) != 0) { Con_Printf("%s is not a PAK file, deleting\n", fullpakname); FS_Close(fp); FS_RemoveFile(fullpakname, 0); return; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK %s had bogus # of directory entries: %i, deleting\n", fullpakname, directory.nEntries); FS_Close(fp); FS_RemoveFile(fullpakname, 0); return; } directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); for (int nCurrent = 0; nCurrent < directory.nEntries; nCurrent++) { entry = &directory.p_rgEntries[nCurrent]; COM_FileBase(entry->resource.szFileName, szFileName); if ((unsigned int)entry->nFileLength >= MAX_FILE_SIZE) { Con_Printf("Mismatched data in HPAK file %s, deleting\n", fullpakname); Con_Printf("Unable to MD5 hash data lump %i, size invalid: %i\n", nCurrent + 1, entry->nFileLength); FS_Close(fp); FS_RemoveFile(fullpakname, 0); Mem_Free(directory.p_rgEntries); return; } pData = (byte *)Mem_Malloc(entry->nFileLength + 1); Q_memset(pData, 0, entry->nFileLength); FS_Seek(fp, entry->nOffset, FILESYSTEM_SEEK_HEAD); FS_Read(pData, entry->nFileLength, 1, fp); Q_memset(&ctx, 0, sizeof(MD5Context_t)); MD5Init(&ctx); MD5Update(&ctx, pData, entry->nFileLength); MD5Final(md5, &ctx); if (pData) Mem_Free(pData); if (Q_memcmp(entry->resource.rgucMD5_hash, md5, sizeof(md5)) != 0) { Con_Printf("Mismatched data in HPAK file %s, deleting\n", fullpakname); FS_Close(fp); FS_RemoveFile(fullpakname, 0); Mem_Free(directory.p_rgEntries); return; } } FS_Close(fp); Mem_Free(directory.p_rgEntries); }
/* * @brief */ static void LoadPortals(const char *filename) { uint32_t i; portal_t *p; leaf_t *l; char magic[80]; char *buffer, *s; int32_t len; int32_t num_points; winding_t *w; int32_t leaf_nums[2]; plane_t plane; if (Fs_Load(filename, (void **) &buffer) == -1) Com_Error(ERR_FATAL, "Could not open %s\n", filename); s = buffer; memset(&map_vis, 0, sizeof(map_vis)); if (sscanf(s, "%79s\n%u\n%u\n%n", magic, &map_vis.portal_clusters, &map_vis.num_portals, &len) != 3) Com_Error(ERR_FATAL, "Failed to read header: %s\n", filename); s += len; if (g_strcmp0(magic, PORTALFILE)) Com_Error(ERR_FATAL, "Not a portal file: %s\n", filename); Com_Verbose("Loading %4u portals, %4u clusters from %s...\n", map_vis.num_portals, map_vis.portal_clusters, filename); // these counts should take advantage of 64 bit systems automatically map_vis.leaf_bytes = ((map_vis.portal_clusters + 63) & ~63) >> 3; map_vis.leaf_longs = map_vis.leaf_bytes / sizeof(long); map_vis.portal_bytes = ((map_vis.num_portals * 2 + 63) & ~63) >> 3; map_vis.portal_longs = map_vis.portal_bytes / sizeof(long); // each file portal is split into two memory portals map_vis.portals = Mem_Malloc(2 * map_vis.num_portals * sizeof(portal_t)); // allocate the leafs map_vis.leafs = Mem_Malloc(map_vis.portal_clusters * sizeof(leaf_t)); map_vis.uncompressed_size = map_vis.portal_clusters * map_vis.leaf_bytes; map_vis.uncompressed = Mem_Malloc(map_vis.uncompressed_size); map_vis.base = map_vis.pointer = d_bsp.vis_data; d_vis->num_clusters = map_vis.portal_clusters; map_vis.pointer = (byte *) &d_vis->bit_offsets[map_vis.portal_clusters]; map_vis.end = map_vis.base + MAX_BSP_VISIBILITY; for (i = 0, p = map_vis.portals; i < map_vis.num_portals; i++) { int32_t j; if (sscanf(s, "%i %i %i %n", &num_points, &leaf_nums[0], &leaf_nums[1], &len) != 3) { Com_Error(ERR_FATAL, "Failed to read portal %i\n", i); } s += len; if (num_points > MAX_POINTS_ON_WINDING) { Com_Error(ERR_FATAL, "Portal %i has too many points\n", i); } if ((uint32_t) leaf_nums[0] > map_vis.portal_clusters || (uint32_t) leaf_nums[1] > map_vis.portal_clusters) { Com_Error(ERR_FATAL, "Portal %i has invalid leafs\n", i); } w = p->winding = NewWinding(num_points); w->original = true; w->num_points = num_points; for (j = 0; j < num_points; j++) { double v[3]; int32_t k; // scanf into double, then assign to vec_t // so we don't care what size vec_t is if (sscanf(s, "(%lf %lf %lf ) %n", &v[0], &v[1], &v[2], &len) != 3) Com_Error(ERR_FATAL, "Failed to read portal vertex definition %i:%i\n", i, j); s += len; for (k = 0; k < 3; k++) w->points[j][k] = v[k]; } if (sscanf(s, "\n%n", &len)) { s += len; } // calc plane PlaneFromWinding(w, &plane); // create forward portal l = &map_vis.leafs[leaf_nums[0]]; if (l->num_portals == MAX_PORTALS_ON_LEAF) Com_Error(ERR_FATAL, "MAX_PORTALS_ON_LEAF\n"); l->portals[l->num_portals] = p; l->num_portals++; p->winding = w; VectorSubtract(vec3_origin, plane.normal, p->plane.normal); p->plane.dist = -plane.dist; p->leaf = leaf_nums[1]; SetPortalSphere(p); p++; // create backwards portal l = &map_vis.leafs[leaf_nums[1]]; if (l->num_portals == MAX_PORTALS_ON_LEAF) Com_Error(ERR_FATAL, "MAX_PORTALS_ON_LEAF\n"); l->portals[l->num_portals] = p; l->num_portals++; p->winding = NewWinding(w->num_points); p->winding->num_points = w->num_points; for (j = 0; j < w->num_points; j++) { VectorCopy(w->points[w->num_points - 1 - j], p->winding->points[j]); } p->plane = plane; p->leaf = leaf_nums[0]; SetPortalSphere(p); p++; } Fs_Free(buffer); }
qboolean Netchan_CopyFileFragments(netchan_t *chan) { fragbuf_t *p; int nsize; unsigned char *buffer; int pos; signed int cursize; char filename[MAX_PATH]; char compressor[32]; fragbuf_s *n; qboolean bCompressed; unsigned int uncompressedSize; if (!chan->incomingready[FRAG_FILE_STREAM]) return FALSE; p = chan->incomingbufs[FRAG_FILE_STREAM]; if (!p) { Con_Printf("%s: Called with no fragments readied\n", __func__); chan->incomingready[FRAG_FILE_STREAM] = FALSE; return FALSE; } bCompressed = FALSE; SZ_Clear(&net_message); MSG_BeginReading(); SZ_Write(&net_message, p->frag_message.data, p->frag_message.cursize); Q_strncpy(filename, MSG_ReadString(), sizeof(filename) - 1); filename[sizeof(filename) - 1] = 0; Q_strncpy(compressor, MSG_ReadString(), sizeof(compressor) - 1); compressor[sizeof(compressor) - 1] = 0; if (!Q_stricmp(compressor, "bz2")) bCompressed = TRUE; uncompressedSize = (unsigned int)MSG_ReadLong(); #ifdef REHLDS_FIXES // TODO: this condition is invalid for server->client // TODO: add console message for client // TODO: add client name to message if (uncompressedSize > 1024 * 64) { Con_Printf("Received too large file (size=%u)\nFlushing input queue\n", uncompressedSize); Netchan_FlushIncoming(chan, 1); return FALSE; } #endif if (Q_strlen(filename) <= 0) { Con_Printf("File fragment received with no filename\nFlushing input queue\n"); Netchan_FlushIncoming(chan, 1); return FALSE; } if (Q_strstr(filename, "..")) { Con_Printf("File fragment received with relative path, ignoring\n"); Netchan_FlushIncoming(chan, 1); return FALSE; } if (filename[0] != '!' && !IsSafeFileToDownload(filename)) { Con_Printf("File fragment received with bad path, ignoring\n"); Netchan_FlushIncoming(chan, 1); return FALSE; } // This prohibits to write files to FS on server if (g_pcls.state == ca_dedicated && filename[0] != '!') { Con_Printf("File fragment received with bad path, ignoring (2)\n"); Netchan_FlushIncoming(chan, 1); return FALSE; } Q_strncpy(chan->incomingfilename, filename, MAX_PATH - 1); chan->incomingfilename[MAX_PATH - 1] = 0; if (filename[0] != '!' && FS_FileExists(filename)) { Con_Printf("Can't download %s, already exists\n", filename); Netchan_FlushIncoming(chan, 1); return TRUE; } nsize = 0; while (p) { nsize += p->frag_message.cursize; if (p == chan->incomingbufs[FRAG_FILE_STREAM]) nsize -= msg_readcount; p = p->next; } buffer = (unsigned char*)Mem_ZeroMalloc(nsize + 1); if (!buffer) { Con_Printf("Buffer allocation failed on %i bytes\n", nsize + 1); Netchan_FlushIncoming(chan, 1); return FALSE; } p = chan->incomingbufs[FRAG_FILE_STREAM]; pos = 0; while (p) { n = p->next; cursize = p->frag_message.cursize; // First message has the file name, don't write that into the data stream, just write the rest of the actual data if (p == chan->incomingbufs[FRAG_FILE_STREAM]) { // Copy it in cursize -= msg_readcount; Q_memcpy(&buffer[pos], &p->frag_message.data[msg_readcount], cursize); p->frag_message.cursize = cursize; } else { Q_memcpy(&buffer[pos], p->frag_message.data, cursize); } pos += p->frag_message.cursize; Mem_Free(p); p = n; } if (bCompressed) { unsigned char* uncompressedBuffer = (unsigned char*)Mem_Malloc(uncompressedSize); Con_DPrintf("Decompressing file %s (%d -> %d)\n", filename, nsize, uncompressedSize); BZ2_bzBuffToBuffDecompress((char*)uncompressedBuffer, &uncompressedSize, (char*)buffer, nsize, 1, 0); Mem_Free(buffer); pos = uncompressedSize; buffer = uncompressedBuffer; } if (filename[0] == '!') { if (chan->tempbuffer) { Con_DPrintf("Netchan_CopyFragments: Freeing holdover tempbuffer\n"); Mem_Free(chan->tempbuffer); } chan->tempbuffer = buffer; chan->tempbuffersize = pos; } else { char filedir[MAX_PATH]; char *pszFileName; FileHandle_t handle; #ifdef REHLDS_CHECKS Q_strncpy(filedir, filename, sizeof(filedir) - 1); filedir[sizeof(filedir) - 1] = 0; #else Q_strncpy(filedir, filename, sizeof(filedir)); #endif // REHLDS_CHECKS COM_FixSlashes(filedir); pszFileName = Q_strrchr(filedir, '\\'); if (pszFileName) { *pszFileName = 0; #ifdef REHLDS_FIXES FS_CreateDirHierarchy(filedir, "GAMEDOWNLOAD"); #endif } #ifndef REHLDS_FIXES FS_CreateDirHierarchy(filedir, "GAMEDOWNLOAD"); #endif handle = FS_OpenPathID(filename, "wb", "GAMEDOWNLOAD"); if (!handle) { Con_Printf("File open failed %s\n", filename); Netchan_FlushIncoming(chan, 1); #ifdef REHLDS_FIXES Mem_Free(buffer); #endif return FALSE; } Sys_Printf("COM_WriteFile: %s\n", filename); FS_Write(buffer, pos, 1, handle); FS_Close(handle); Mem_Free(buffer); } SZ_Clear(&net_message); chan->incomingbufs[FRAG_FILE_STREAM] = nullptr; chan->incomingready[FRAG_FILE_STREAM] = FALSE; msg_readcount = 0; return TRUE; }
int Netchan_CreateFileFragments_(qboolean server, netchan_t *chan, const char *filename) #endif // REHLDS_FIXES { int chunksize; int compressedFileTime; FileHandle_t hfile; signed int filesize; int remaining; fragbufwaiting_t *p; int send; fragbuf_t *buf; char compressedfilename[MAX_PATH]; qboolean firstfragment; int bufferid; qboolean bCompressed; int pos; fragbufwaiting_t *wait; int uncompressed_size; bufferid = 1; firstfragment = TRUE; bCompressed = FALSE; chunksize = chan->pfnNetchan_Blocksize(chan->connection_status); Q_snprintf(compressedfilename, sizeof compressedfilename, "%s.ztmp", filename); compressedFileTime = FS_GetFileTime(compressedfilename); if (compressedFileTime >= FS_GetFileTime(filename) && (hfile = FS_Open(compressedfilename, "rb"))) { filesize = FS_Size(hfile); FS_Close(hfile); bCompressed = TRUE; hfile = FS_Open(filename, "rb"); if (!hfile) { Con_Printf("Warning: Unable to open %s for transfer\n", filename); return 0; } uncompressed_size = FS_Size(hfile); if (uncompressed_size > sv_filetransfermaxsize.value) { FS_Close(hfile); Con_Printf("Warning: File %s is too big to transfer from host %s\n", filename, NET_AdrToString(chan->remote_address)); return 0; } } else { hfile = FS_Open(filename, "rb"); if (!hfile) { Con_Printf("Warning: Unable to open %s for transfer\n", filename); return 0; } filesize = FS_Size(hfile); if (filesize > sv_filetransfermaxsize.value) { FS_Close(hfile); Con_Printf("Warning: File %s is too big to transfer from host %s\n", filename, NET_AdrToString(chan->remote_address)); return 0; } uncompressed_size = filesize; if (sv_filetransfercompression.value != 0.0) { unsigned char* uncompressed = (unsigned char*)Mem_Malloc(filesize); unsigned char* compressed = (unsigned char*)Mem_Malloc(filesize); unsigned int compressedSize = filesize; FS_Read(uncompressed, filesize, 1, hfile); if (BZ_OK == BZ2_bzBuffToBuffCompress((char*)compressed, &compressedSize, (char*)uncompressed, filesize, 9, 0, 30)) { FileHandle_t destFile = FS_Open(compressedfilename, "wb"); if (destFile) { Con_DPrintf("Creating compressed version of file %s (%d -> %d)\n", filename, filesize, compressedSize); FS_Write(compressed, compressedSize, 1, destFile); FS_Close(destFile); filesize = compressedSize; bCompressed = TRUE; } } Mem_Free(uncompressed); Mem_Free(compressed); } } FS_Close(hfile); wait = (fragbufwaiting_t *)Mem_ZeroMalloc(0xCu); remaining = filesize; pos = 0; while (remaining) { send = min(chunksize, remaining); buf = Netchan_AllocFragbuf(); if (!buf) { Con_Printf("Couldn't allocate fragbuf_t\n"); Mem_Free(wait); if (server) { #ifdef REHLDS_FIXES SV_DropClient(&g_psvs.clients[chan->player_slot - 1], 0, "Malloc problem"); #else // REHLDS_FIXES SV_DropClient(host_client, 0, "Malloc problem"); #endif // REHLDS_FIXES return 0; } else { rehlds_syserror("%s: Reverse clientside code", __func__); //return 0; } } buf->bufferid = bufferid++; SZ_Clear(&buf->frag_message); if (firstfragment) { firstfragment = FALSE; MSG_WriteString(&buf->frag_message, filename); MSG_WriteString(&buf->frag_message, bCompressed ? "bz2" : "uncompressed"); MSG_WriteLong(&buf->frag_message, uncompressed_size); send -= buf->frag_message.cursize; } buf->isfile = TRUE; buf->iscompressed = bCompressed; buf->size = send; buf->foffset = pos; Q_strncpy(buf->filename, filename, MAX_PATH - 1); buf->filename[MAX_PATH - 1] = 0; pos += send; remaining -= send; Netchan_AddFragbufToTail(wait, buf); } if (!chan->waitlist[FRAG_FILE_STREAM]) { chan->waitlist[FRAG_FILE_STREAM] = wait; } else { p = chan->waitlist[FRAG_FILE_STREAM]; while (p->next) p = p->next; p->next = wait; } return 1; }
void Netchan_CreateFileFragmentsFromBuffer(qboolean server, netchan_t *chan, const char *filename, unsigned char *uncompressed_pbuf, int uncompressed_size) { int chunksize; int send; fragbufwaiting_t *p; fragbuf_t *buf; unsigned char *pbuf; qboolean bCompressed; qboolean firstfragment; signed int bufferid; int remaining; int pos; unsigned int size; fragbufwaiting_t *wait; if (!uncompressed_size) return; bufferid = 1; firstfragment = TRUE; size = uncompressed_size; pbuf = (unsigned char *)Mem_Malloc(uncompressed_size); if (BZ2_bzBuffToBuffCompress((char*)pbuf, &size, (char*)uncompressed_pbuf, uncompressed_size, 9, 0, 30)) { bCompressed = FALSE; Mem_Free(pbuf); pbuf = uncompressed_pbuf; size = uncompressed_size; } else { bCompressed = TRUE; Con_DPrintf("Compressed %s for transmission (%d -> %d)\n", filename, uncompressed_size, size); } chunksize = chan->pfnNetchan_Blocksize(chan->connection_status); send = chunksize; wait = (fragbufwaiting_t *)Mem_ZeroMalloc(0xCu); remaining = size; pos = 0; while (remaining > 0) { send = min(remaining, chunksize); buf = (fragbuf_t *)Netchan_AllocFragbuf(); if (!buf) { Con_Printf("Couldn't allocate fragbuf_t\n"); Mem_Free(wait); if (server) SV_DropClient(host_client, 0, "Malloc problem"); else rehlds_syserror("%s:Reverse me: client-side code", __func__); #ifdef REHLDS_FIXES if (bCompressed) { Mem_Free(pbuf); } #endif return; } buf->bufferid = bufferid++; SZ_Clear(&buf->frag_message); if (firstfragment) { firstfragment = FALSE; MSG_WriteString(&buf->frag_message, filename); MSG_WriteString(&buf->frag_message, bCompressed ? "bz2" : "uncompressed"); MSG_WriteLong(&buf->frag_message, uncompressed_size); send -= buf->frag_message.cursize; } buf->isbuffer = TRUE; buf->isfile = TRUE; buf->size = send; buf->foffset = pos; MSG_WriteBuf(&buf->frag_message, send, &pbuf[pos]); pos += send; remaining -= send; Netchan_AddFragbufToTail(wait, buf); } if (!chan->waitlist[FRAG_FILE_STREAM]) { chan->waitlist[FRAG_FILE_STREAM] = wait; } else { p = chan->waitlist[FRAG_FILE_STREAM]; while (p->next) p = p->next; p->next = wait; } #ifdef REHLDS_FIXES if (bCompressed) { Mem_Free(pbuf); } #endif }
/* <2a3cb> ../engine/hashpak.c:1235 */ void HPAK_Validate_f(void) { hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; char name[MAX_PATH]; char szFileName[MAX_PATH]; char type[32]; FileHandle_t fp; byte *pData; int nDataSize; byte md5[16]; MD5Context_t ctx; if (cmd_source != src_command) return; HPAK_FlushHostQueue(); if (Cmd_Argc() != 2) { Con_Printf("Usage: hpkval hpkname\n"); return; } Q_snprintf(name, ARRAYSIZE(name), "%s", Cmd_Argv(1)); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); Con_Printf("Validating %s.\n", name); fp = FS_Open(name, "rb"); if (!fp) { Con_Printf("ERROR: couldn't open %s.\n", name); return; } FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp))) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return; } if (header.version != HASHPAK_VERSION) { Con_Printf("hpkval: version mismatch\n"); FS_Close(fp); return; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return; } Con_Printf("# of Entries: %i\n", directory.nEntries); Con_Printf("# Type Size FileName : MD5 Hash\n"); directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); for (int nCurrent = 0; nCurrent < directory.nEntries; nCurrent++) { entry = &directory.p_rgEntries[nCurrent]; COM_FileBase(entry->resource.szFileName, szFileName); switch (entry->resource.type) { case t_sound: Q_strcpy(type, "sound"); break; case t_skin: Q_strcpy(type, "skin"); break; case t_model: Q_strcpy(type, "model"); break; case t_decal: Q_strcpy(type, "decal"); break; case t_generic: Q_strcpy(type, "generic"); break; case t_eventscript: Q_strcpy(type, "event"); break; default: Q_strcpy(type, "?"); break; } Con_Printf("%i: %10s %.2fK %s: ", nCurrent + 1, type, entry->resource.nDownloadSize / 1024.0f, szFileName); nDataSize = entry->nFileLength; if (nDataSize < 1 || (unsigned int)nDataSize >= MAX_FILE_SIZE) Con_Printf("Unable to MD5 hash data, size invalid: %i\n", nDataSize); else { pData = (byte *)Mem_Malloc(nDataSize + 1); Q_memset(pData, 0, nDataSize); FS_Seek(fp, entry->nOffset, FILESYSTEM_SEEK_HEAD); FS_Read(pData, nDataSize, 1, fp); Q_memset(&ctx, 0, sizeof(MD5Context_t)); MD5Init(&ctx); MD5Update(&ctx, pData, nDataSize); MD5Final(md5, &ctx); if (Q_memcmp(entry->resource.rgucMD5_hash, md5, sizeof(md5)) == 0) Con_Printf(" OK\n"); else { Con_Printf(" MISMATCHED\n"); Con_Printf("--------------------\n"); Con_Printf(" File : %s\n", MD5_Print(entry->resource.rgucMD5_hash)); Con_Printf(" Actual: %s\n", MD5_Print(md5)); Con_Printf("--------------------\n"); } if (pData) Mem_Free(pData); } } FS_Close(fp); Mem_Free(directory.p_rgEntries); }
NOXREF void TextMessageParse(unsigned char *pMemFile, int fileSize) { NOXREFCHECK; char buf[512]; char trim[512]; char *pCurrentText; char *pNameHeap; char currentName[512]; char nameHeap[NAME_HEAP_SIZE]; int lastNamePos; int mode; int lineNumber; int filePos; int lastLinePos; int messageCount; client_textmessage_t textMessages[MAX_MESSAGES]; int i; int nameHeapSize; int textHeapSize; int messageSize; int nameOffset; lastNamePos = 0; lineNumber = 0; filePos = 0; lastLinePos = 0; messageCount = 0; mode = MSGFILE_NAME; while (memfgets(pMemFile, fileSize, &filePos, buf, 512) != NULL) { if(messageCount >= MAX_MESSAGES) Sys_Error("%s: messageCount >= MAX_MESSAGES", __func__); TrimSpace(buf, trim); switch (mode) { case MSGFILE_NAME: { if (IsComment(trim)) break; if (ParseDirective(trim)) break; if (IsStartOfText(trim)) { mode = MSGFILE_TEXT; pCurrentText = (char *)(pMemFile + filePos); break; } if (IsEndOfText(trim)) { Con_DPrintf("Unexpected '}' found, line %d\n", lineNumber); return; } Q_strncpy(currentName, trim, 511); currentName[511] = 0; break; } case MSGFILE_TEXT: { if (IsEndOfText(trim)) { int length = Q_strlen(currentName); if (lastNamePos + length > sizeof(nameHeap)) { Con_DPrintf("Error parsing file! length > %i bytes\n", sizeof(nameHeap)); return; } Q_strcpy(nameHeap + lastNamePos, currentName); pMemFile[lastLinePos - 1] = 0; textMessages[messageCount] = gMessageParms; textMessages[messageCount].pName = nameHeap + lastNamePos; lastNamePos += Q_strlen(currentName) + 1; textMessages[messageCount].pMessage = pCurrentText; messageCount++; mode = MSGFILE_NAME; break; } if (IsStartOfText(trim)) { Con_DPrintf("Unexpected '{' found, line %d\n", lineNumber); return; } break; } } lineNumber++; lastLinePos = filePos; } Con_DPrintf("Parsed %d text messages\n", messageCount); nameHeapSize = lastNamePos; textHeapSize = 0; for (i = 0; i < messageCount; i++) textHeapSize += Q_strlen(textMessages[i].pMessage) + 1; messageSize = (messageCount * sizeof(client_textmessage_t)); gMessageTable = (client_textmessage_t *)Mem_Malloc(textHeapSize + nameHeapSize + messageSize); Q_memcpy(gMessageTable, textMessages, messageSize); pNameHeap = ((char *)gMessageTable) + messageSize; Q_memcpy(pNameHeap, nameHeap, nameHeapSize); nameOffset = pNameHeap - gMessageTable[0].pName; pCurrentText = pNameHeap + nameHeapSize; for (i = 0; i < messageCount; i++) { gMessageTable[i].pName += nameOffset; Q_strcpy(pCurrentText, gMessageTable[i].pMessage); gMessageTable[i].pMessage = pCurrentText; pCurrentText += Q_strlen(pCurrentText) + 1; } gMessageTableCount = messageCount; }
/* <2a644> ../engine/hashpak.c:945 */ void HPAK_List_f(void) { hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; char name[MAX_PATH]; char szFileName[MAX_PATH]; char type[32]; FileHandle_t fp; if (cmd_source != src_command) return; HPAK_FlushHostQueue(); Q_snprintf(name, ARRAYSIZE(name), "%s", Cmd_Argv(1)); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); Con_Printf("Contents for %s.\n", name); fp = FS_Open(name, "rb"); if (!fp) { Con_Printf("ERROR: couldn't open %s.\n", name); return; } FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp))) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return; } if (header.version != HASHPAK_VERSION) { Con_Printf("HPAK_List: version mismatch\n"); FS_Close(fp); return; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return; } Con_Printf("# of Entries: %i\n", directory.nEntries); Con_Printf("# Type Size FileName : MD5 Hash\n"); directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); for (int nCurrent = 0; nCurrent < directory.nEntries; nCurrent++) { entry = &directory.p_rgEntries[nCurrent]; COM_FileBase(entry->resource.szFileName, szFileName); switch (entry->resource.type) { case t_sound: Q_strcpy(type, "sound"); break; case t_skin: Q_strcpy(type, "skin"); break; case t_model: Q_strcpy(type, "model"); break; case t_decal: Q_strcpy(type, "decal"); break; case t_generic: Q_strcpy(type, "generic"); break; case t_eventscript: Q_strcpy(type, "event"); break; default: Q_strcpy(type, "?"); break; } Con_Printf("%i: %10s %.2fK %s\n : %s\n", nCurrent + 1, type, entry->resource.nDownloadSize / 1024.0f, szFileName, MD5_Print(entry->resource.rgucMD5_hash)); } FS_Close(fp); Mem_Free(directory.p_rgEntries); }
/* <2a121> ../engine/hashpak.c:1060 */ void HPAK_CreatePak(char *pakname, struct resource_s *pResource, void *pData, FileHandle_t fpSource) { char name[MAX_PATH]; int32 curpos; FileHandle_t fp; hash_pack_entry_t *pCurrentEntry; byte md5[16]; MD5Context_t ctx; byte *pDiskData; if ((!fpSource && !pData) || (fpSource && pData)) { Con_Printf("HPAK_CreatePak, must specify one of pData or fpSource\n"); return; } Q_snprintf(name, ARRAYSIZE(name), "%s", pakname); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); Con_Printf("Creating HPAK %s.\n", name); fp = FS_Open(name, "wb"); if (!fp) { Con_Printf("ERROR: couldn't open new .hpk, check access rights to %s.\n", name); return; } Q_memset(&ctx, 0, sizeof(MD5Context_t)); MD5Init(&ctx); if (pData) MD5Update(&ctx, (byte *)pData, pResource->nDownloadSize); else { curpos = FS_Tell(fpSource); pDiskData = (byte *)Mem_Malloc(pResource->nDownloadSize + 1); Q_memset(pDiskData, 0, pResource->nDownloadSize); FS_Read(pDiskData, pResource->nDownloadSize, 1, fp); FS_Seek(fpSource, curpos, FILESYSTEM_SEEK_HEAD); MD5Update(&ctx, pDiskData, pResource->nDownloadSize); Mem_Free(pDiskData); } MD5Final(md5, &ctx); if (Q_memcmp(pResource->rgucMD5_hash, md5, sizeof(md5)) != 0) { Con_Printf("HPAK_CreatePak called with bogus lump, md5 mismatch\n"); Con_Printf("Purported: %s\n", MD5_Print(pResource->rgucMD5_hash)); Con_Printf("Actual : %s\n", MD5_Print(md5)); Con_Printf("Ignoring lump addition\n"); return; } Q_memset(&hash_pack_header, 0, sizeof(hash_pack_header_t)); Q_memcpy(hash_pack_header.szFileStamp, "HPAK", sizeof(hash_pack_header.szFileStamp)); hash_pack_header.version = HASHPAK_VERSION; hash_pack_header.nDirectoryOffset = 0; FS_Write(&hash_pack_header, sizeof(hash_pack_header_t), 1, fp); Q_memset(&hash_pack_dir, 0, sizeof(hash_pack_directory_t)); hash_pack_dir.nEntries = 1; hash_pack_dir.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t)); Q_memset(hash_pack_dir.p_rgEntries, 0, sizeof(hash_pack_entry_t) * hash_pack_dir.nEntries); pCurrentEntry = &hash_pack_dir.p_rgEntries[0]; Q_memcpy(&pCurrentEntry->resource, pResource, sizeof(resource_t)); pCurrentEntry->nOffset = FS_Tell(fp); pCurrentEntry->nFileLength = pResource->nDownloadSize; if (pData) FS_Write(pData, pResource->nDownloadSize, 1, fp); else COM_CopyFileChunk(fp, fpSource, pResource->nDownloadSize); curpos = FS_Tell(fp); FS_Write(&hash_pack_dir.nEntries, 4, 1, fp); FS_Write(hash_pack_dir.p_rgEntries, sizeof(hash_pack_entry_t), 1, fp); if (hash_pack_dir.p_rgEntries) { Mem_Free(hash_pack_dir.p_rgEntries); hash_pack_dir.p_rgEntries = NULL; } hash_pack_dir.nEntries = 0; hash_pack_header.nDirectoryOffset = curpos; FS_Seek(fp, 0, FILESYSTEM_SEEK_HEAD); FS_Write(&hash_pack_header, sizeof(hash_pack_header_t), 1, fp); FS_Close(fp); }
/* <2a33c> ../engine/hashpak.c:1401 */ void HPAK_Extract_f(void) { hash_pack_header_t header; hash_pack_directory_t directory; hash_pack_entry_t *entry; char name[MAX_PATH]; char type[32]; FileHandle_t fp; int nIndex; byte *pData; int nDataSize; FileHandle_t fpOutput; char szFileOut[MAX_PATH]; if (cmd_source != src_command) return; HPAK_FlushHostQueue(); if (Cmd_Argc() != 3) { Con_Printf("Usage: hpkextract hpkname [all | single index]\n"); return; } if (Q_stricmp(Cmd_Argv(2),"all") != 0) { nIndex = Q_atoi(Cmd_Argv(2)); #ifdef REHLDS_FIXES Q_snprintf(name, sizeof(name), "%s", Cmd_Argv(1)); #else Q_snprintf(name, 256, "%s", Cmd_Argv(1)); #endif // REHLDS_FIXES if (nIndex != -1) Con_Printf("Extracting lump %i from %s\n", nIndex, name); } else { nIndex = -1; Q_snprintf(name, ARRAYSIZE(name), "%s", Cmd_Argv(1)); #ifdef REHLDS_FIXES name[ARRAYSIZE(name) - 1] = 0; #endif // REHLDS_FIXES COM_DefaultExtension(name, HASHPAK_EXTENSION); Con_Printf("Extracting all lumps from %s.\n", name); } fp = FS_Open(name, "rb"); if (!fp) { Con_Printf("ERROR: couldn't open %s.\n", name); return; } FS_Read(&header, sizeof(hash_pack_header_t), 1, fp); if (Q_strncmp(header.szFileStamp, "HPAK", sizeof(header.szFileStamp))) { Con_Printf("%s is not an HPAK file\n", name); FS_Close(fp); return; } if (header.version != HASHPAK_VERSION) { Con_Printf("hpkextract: version mismatch\n"); FS_Close(fp); return; } FS_Seek(fp, header.nDirectoryOffset, FILESYSTEM_SEEK_HEAD); FS_Read(&directory.nEntries, 4, 1, fp); if (directory.nEntries < 1 || (unsigned int)directory.nEntries > MAX_FILE_ENTRIES) { Con_Printf("ERROR: HPAK had bogus # of directory entries: %i\n", directory.nEntries); FS_Close(fp); return; } Con_Printf("# of Entries: %i\n", directory.nEntries); Con_Printf("# Type Size FileName : MD5 Hash\n"); directory.p_rgEntries = (hash_pack_entry_t *)Mem_Malloc(sizeof(hash_pack_entry_t) * directory.nEntries); FS_Read(directory.p_rgEntries, sizeof(hash_pack_entry_t) * directory.nEntries, 1, fp); for (int nCurrent = 0; nCurrent < directory.nEntries; nCurrent++) { entry = &directory.p_rgEntries[nCurrent]; if (nIndex == -1 || nIndex == nCurrent) { COM_FileBase(entry->resource.szFileName, szFileOut); switch (entry->resource.type) { case t_sound: Q_strcpy(type, "sound"); break; case t_skin: Q_strcpy(type, "skin"); break; case t_model: Q_strcpy(type, "model"); break; case t_decal: Q_strcpy(type, "decal"); break; case t_generic: Q_strcpy(type, "generic"); break; case t_eventscript: Q_strcpy(type, "event"); break; default: Q_strcpy(type, "?"); break; } Con_Printf("Extracting %i: %10s %.2fK %s\n", nCurrent, type, entry->resource.nDownloadSize / 1024.0f, szFileOut); nDataSize = entry->nFileLength; if (nDataSize < 1 || (unsigned int)nDataSize >= MAX_FILE_SIZE) Con_Printf("Unable to extract data, size invalid: %s\n", nDataSize); else { pData = (byte *)Mem_Malloc(nDataSize + 1); Q_memset(pData, 0, nDataSize); FS_Seek(fp, entry->nOffset, FILESYSTEM_SEEK_HEAD); FS_Read(pData, nDataSize, 1, fp); Q_snprintf(szFileOut, sizeof(szFileOut), "hpklmps\\lmp%04i.wad", nCurrent); COM_FixSlashes(szFileOut); COM_CreatePath(szFileOut); fpOutput = FS_Open(szFileOut, "wb"); if (fpOutput) { FS_Write(pData, nDataSize, 1, fpOutput); FS_Close(fpOutput); } else Con_Printf("Error creating lump file %s\n", szFileOut); if (pData) Mem_Free(pData); } } } FS_Close(fp); Mem_Free(directory.p_rgEntries); }
/* <5e43> ../engine/cmd.c:347 */ void Cmd_Exec_f(void) { const char *pszFileName; const char *pszFileExt; char *pszFileData; int nAddLen; FileHandle_t hFile; if (Cmd_Argc() != 2) { Con_Printf("exec <filename> : execute a script file\n"); return; } pszFileName = Cmd_Argv(1); if (!pszFileName || pszFileName[0] == 0) { return; } if (Q_strstr(pszFileName, "\\") || Q_strstr(pszFileName, ":") || Q_strstr(pszFileName, "~") || Q_strstr(pszFileName, "..") || *pszFileName == '/') { Con_Printf("exec %s: invalid path.\n", pszFileName); return; } pszFileExt = COM_FileExtension((char *)pszFileName); if (Q_stricmp(pszFileExt, "cfg") && Q_stricmp(pszFileExt, "rc")) { Con_Printf("exec %s: not a .cfg or .rc file\n", pszFileName); return; } hFile = FS_OpenPathID(pszFileName, "rb", "GAMECONFIG"); if (!hFile) { hFile = FS_OpenPathID(pszFileName, "rb", "GAME"); } if (!hFile) { hFile = FS_Open(pszFileName, "rb"); } if (!hFile) { if (!Q_strstr(pszFileName, "autoexec.cfg") && !Q_strstr(pszFileName, "userconfig.cfg") && !Q_strstr(pszFileName, "hw/opengl.cfg") && !Q_strstr(pszFileName, "joystick.cfg") && !Q_strstr(pszFileName, "game.cfg")) { Con_Printf("couldn't exec %s\n", pszFileName); } return; } nAddLen = FS_Size(hFile); pszFileData = (char *)Mem_Malloc(nAddLen + 1); if (!pszFileData) { Con_Printf("exec: not enough space for %s", pszFileName); FS_Close(hFile); return; } FS_Read(pszFileData, nAddLen, 1, hFile); pszFileData[nAddLen] = 0; FS_Close(hFile); Con_DPrintf("execing %s\n", pszFileName); if (cmd_text.cursize + nAddLen + 2 < cmd_text.maxsize) { Cbuf_InsertTextLines(pszFileData); } else { char *pszDataPtr = pszFileData; while (true) { Cbuf_Execute(); // TODO: This doesn't obey the rule to first execute commands from the file, and then the others in the buffer pszDataPtr = COM_ParseLine(pszDataPtr); if (com_token[0] == 0) { break; } Cbuf_InsertTextLines(com_token); } } Mem_Free(pszFileData); }
void RandUniList( long count, long first, long last, RandListType list[]) { /*------------------------------------------- create a list of random integers from uniform distribution. There are count numbers put into the list array, and the numbers fall between first and last, inclusive. The numbers are non-repeating, and not necessarily ordered. This method only works for a uniform distribution, but it should be fast for any number of count items. cwb - 6/27/00 -------------------------------------------*/ long i, j, c, range, *klist; range = last - first +1; if (count > range || range <= 0) { fprintf(stderr, "Programmer error in RandUniList: " "count > range || range <= 0\n"); exit (-1); } /* if count == range for some weird reason, just fill the array and return */ if (count == range) { for(i=0; i< count; i++) list[i] = first + i; return; } /* if count <= 2, handle things directly */ /* for less complexity and more speed */ if (count <= 2) { list[0] = (long) RandUniRange(first, last); if (count == 2) while ( (list[1] = RandUniRange(first, last)) == list[0]); return; } /* otherwise, go through the whole groovy algorithm */ /* allocate space for the temp list */ klist = (long *) Mem_Malloc(sizeof(long) * range, "RandUniList"); /* populate the list with valid numbers */ for( i=0, j = first; j <= last; klist[i++] = j++); /* now randomize the list */ for( i=0; i < range; i++) { while( (j=RandUniRange(0, range-1) ) == i); c = klist[i]; klist[i] = klist[j]; klist[j] = c; } /* remove count items from the top of the */ /* shuffled list */ for( i=0; i< count; i++) { list[i] = klist[i]; } Mem_Free(klist); }