void fix_file_sizes(FILE *outfile, const long offset, struct cpk_file *file, int i) { long file_size_off = query_utf_offset(outfile, offset, file[i].index, "FileSize"); long extract_size_off = query_utf_offset(outfile, offset, file[i].index, "ExtractSize"); unsigned char size_buf[4]; write_32_be(file[i].new_size, size_buf); replace_data(outfile, file_size_off, size_buf, 4); replace_data(outfile, extract_size_off, size_buf, 4); printf("%s changed in size by %ld bytes\n", file[i].filename, (long)(file[i].new_size-file[i].orig_size)); }
enum TDB_ERROR tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag) { struct hash_info h; tdb_off_t off; tdb_len_t old_room = 0; struct tdb_used_record rec; enum TDB_ERROR ecode; off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL); if (TDB_OFF_IS_ERR(off)) { return tdb->last_error = off; } /* Now we have lock on this hash bucket. */ if (flag == TDB_INSERT) { if (off) { ecode = TDB_ERR_EXISTS; goto out; } } else { if (off) { old_room = rec_data_length(&rec) + rec_extra_padding(&rec); if (old_room >= dbuf.dsize) { /* Can modify in-place. Easy! */ ecode = update_rec_hdr(tdb, off, key.dsize, dbuf.dsize, &rec, h.h); if (ecode != TDB_SUCCESS) { goto out; } ecode = update_data(tdb, off + sizeof(rec) + key.dsize, dbuf, old_room - dbuf.dsize); if (ecode != TDB_SUCCESS) { goto out; } tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return tdb->last_error = TDB_SUCCESS; } } else { if (flag == TDB_MODIFY) { /* if the record doesn't exist and we are in TDB_MODIFY mode then we should fail the store */ ecode = TDB_ERR_NOEXIST; goto out; } } } /* If we didn't use the old record, this implies we're growing. */ ecode = replace_data(tdb, &h, key, dbuf, off, old_room, off); out: tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return tdb->last_error = ecode; }
_PUBLIC_ enum NTDB_ERROR ntdb_store(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf, int flag) { struct hash_info h; ntdb_off_t off; ntdb_len_t old_room = 0; struct ntdb_used_record rec; enum NTDB_ERROR ecode; off = find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } /* Now we have lock on this hash bucket. */ if (flag == NTDB_INSERT) { if (off) { ecode = NTDB_ERR_EXISTS; goto out; } } else { if (off) { old_room = rec_data_length(&rec) + rec_extra_padding(&rec); if (old_room >= dbuf.dsize) { /* Can modify in-place. Easy! */ ecode = update_rec_hdr(ntdb, off, key.dsize, dbuf.dsize, &rec); if (ecode != NTDB_SUCCESS) { goto out; } ecode = update_data(ntdb, off + sizeof(rec) + key.dsize, dbuf, old_room - dbuf.dsize); if (ecode != NTDB_SUCCESS) { goto out; } ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return NTDB_SUCCESS; } } else { if (flag == NTDB_MODIFY) { /* if the record doesn't exist and we are in NTDB_MODIFY mode then we should fail the store */ ecode = NTDB_ERR_NOEXIST; goto out; } } } /* If we didn't use the old record, this implies we're growing. */ ecode = replace_data(ntdb, &h, key, dbuf, off, old_room, off); out: ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return ecode; }
uint64_t fix_offset(FILE *outfile, const long offset, const long additional, int index, const char *name, struct cpk_file *file, int file_count) { long orig_offset = additional + query_utf_8byte(outfile, offset, index, name); long off_off = query_utf_offset(outfile, offset, index, name); long new_offset = orig_offset; for(int i = 0; i < file_count; i++) { if(orig_offset <= file[i].orig_offset) break; else if(file[i].copied) new_offset += file[i].offset_diff; } unsigned char offset_buf[8]; write_64_be(new_offset-additional, offset_buf); replace_data(outfile, off_off, offset_buf, 8); return new_offset; }
_PUBLIC_ enum NTDB_ERROR ntdb_append(struct ntdb_context *ntdb, NTDB_DATA key, NTDB_DATA dbuf) { struct hash_info h; ntdb_off_t off; struct ntdb_used_record rec; ntdb_len_t old_room = 0, old_dlen; unsigned char *newdata; NTDB_DATA new_dbuf; enum NTDB_ERROR ecode; off = find_and_lock(ntdb, key, F_WRLCK, &h, &rec, NULL); if (NTDB_OFF_IS_ERR(off)) { return NTDB_OFF_TO_ERR(off); } if (off) { old_dlen = rec_data_length(&rec); old_room = old_dlen + rec_extra_padding(&rec); /* Fast path: can append in place. */ if (rec_extra_padding(&rec) >= dbuf.dsize) { ecode = update_rec_hdr(ntdb, off, key.dsize, old_dlen + dbuf.dsize, &rec); if (ecode != NTDB_SUCCESS) { goto out; } off += sizeof(rec) + key.dsize + old_dlen; ecode = update_data(ntdb, off, dbuf, rec_extra_padding(&rec)); goto out; } /* Slow path. */ newdata = ntdb->alloc_fn(ntdb, key.dsize + old_dlen + dbuf.dsize, ntdb->alloc_data); if (!newdata) { ecode = ntdb_logerr(ntdb, NTDB_ERR_OOM, NTDB_LOG_ERROR, "ntdb_append:" " failed to allocate %zu bytes", (size_t)(key.dsize + old_dlen + dbuf.dsize)); goto out; } ecode = ntdb->io->tread(ntdb, off + sizeof(rec) + key.dsize, newdata, old_dlen); if (ecode != NTDB_SUCCESS) { goto out_free_newdata; } memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize); new_dbuf.dptr = newdata; new_dbuf.dsize = old_dlen + dbuf.dsize; } else { newdata = NULL; new_dbuf = dbuf; } /* If they're using ntdb_append(), it implies they're growing record. */ ecode = replace_data(ntdb, &h, key, new_dbuf, off, old_room, true); out_free_newdata: ntdb->free_fn(newdata, ntdb->alloc_data); out: ntdb_unlock_hash(ntdb, h.h, F_WRLCK); return ecode; }
enum TDB_ERROR tdb_append(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf) { struct hash_info h; tdb_off_t off; struct tdb_used_record rec; tdb_len_t old_room = 0, old_dlen; unsigned char *newdata; struct tdb_data new_dbuf; enum TDB_ERROR ecode; off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL); if (TDB_OFF_IS_ERR(off)) { return tdb->last_error = off; } if (off) { old_dlen = rec_data_length(&rec); old_room = old_dlen + rec_extra_padding(&rec); /* Fast path: can append in place. */ if (rec_extra_padding(&rec) >= dbuf.dsize) { ecode = update_rec_hdr(tdb, off, key.dsize, old_dlen + dbuf.dsize, &rec, h.h); if (ecode != TDB_SUCCESS) { goto out; } off += sizeof(rec) + key.dsize + old_dlen; ecode = update_data(tdb, off, dbuf, rec_extra_padding(&rec)); goto out; } /* Slow path. */ newdata = malloc(key.dsize + old_dlen + dbuf.dsize); if (!newdata) { ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR, "tdb_append:" " failed to allocate %zu bytes", (size_t)(key.dsize + old_dlen + dbuf.dsize)); goto out; } ecode = tdb->methods->tread(tdb, off + sizeof(rec) + key.dsize, newdata, old_dlen); if (ecode != TDB_SUCCESS) { goto out_free_newdata; } memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize); new_dbuf.dptr = newdata; new_dbuf.dsize = old_dlen + dbuf.dsize; } else { newdata = NULL; new_dbuf = dbuf; } /* If they're using tdb_append(), it implies they're growing record. */ ecode = replace_data(tdb, &h, key, new_dbuf, off, old_room, true); out_free_newdata: free(newdata); out: tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return tdb->last_error = ecode; }