static __inline char * read_runtime_path(HANDLE h) { char buffer[TRAILER_SIZE]; static char runtime_path[MAX_PATH]; DWORD nread; int num_sections, path_size, i; long ofs; if (SetFilePointer(h, -TRAILER_SIZE, NULL, FILE_END) == -1) return NULL; if (! ReadFile(h, buffer, TRAILER_SIZE, &nread, NULL)) return NULL; if (nread != TRAILER_SIZE) return NULL; num_sections = read_size(buffer); ofs = TRAILER_SIZE + num_sections * 8; if (SetFilePointer(h, - ofs, NULL, FILE_END) == -1) return NULL; path_size = 0; for (i = 0; i < num_sections; i++) { if (! ReadFile(h, buffer, 8, &nread, NULL) || nread != 8) return NULL; if (buffer[0] == 'R' && buffer[1] == 'N' && buffer[2] == 'T' && buffer[3] == 'M') { path_size = read_size(buffer + 4); ofs += path_size; } else if (path_size > 0) ofs += read_size(buffer + 4); } if (path_size == 0) return default_runtime_name; if (path_size >= MAX_PATH) return NULL; if (SetFilePointer(h, -ofs, NULL, FILE_END) == -1) return NULL; if (! ReadFile(h, runtime_path, path_size, &nread, NULL)) return NULL; if (nread != path_size) return NULL; runtime_path[path_size - 1] = 0; return runtime_path; }
static char * read_runtime_path(int fd) { char buffer[TRAILER_SIZE]; static char runtime_path[MAXPATHLEN]; int num_sections, i; uint32 path_size; long ofs; lseek(fd, (long) -TRAILER_SIZE, SEEK_END); if (read(fd, buffer, TRAILER_SIZE) < TRAILER_SIZE) return NULL; num_sections = read_size(buffer); ofs = TRAILER_SIZE + num_sections * 8; lseek(fd, -ofs, SEEK_END); path_size = 0; for (i = 0; i < num_sections; i++) { if (read(fd, buffer, 8) < 8) return NULL; if (buffer[0] == 'R' && buffer[1] == 'N' && buffer[2] == 'T' && buffer[3] == 'M') { path_size = read_size(buffer + 4); ofs += path_size; } else if (path_size > 0) ofs += read_size(buffer + 4); } if (path_size == 0) return default_runtime_path; if (path_size >= MAXPATHLEN) return NULL; lseek(fd, -ofs, SEEK_END); if (read(fd, runtime_path, path_size) != path_size) return NULL; runtime_path[path_size - 1] = 0; return runtime_path; }
array_2d <T> operator()(std::istream& s) { array_2d <T> a(read_size(s), read_size(s)); read(a, s); return a; }
static size_t onRead(char *buffer, size_t size, size_t nitems, void *instream) { WebRequestInternalState *is_(reinterpret_cast<WebRequestInternalState*>(instream)); is_->state = HTTP_OPEN; if (is_->isAborted) { is_->state = HTTP_CLOSED; return CURL_READFUNC_ABORT; } // Find the size in bytes. size_t real_size(size * nitems); // Read as much as we can from the upload buffer queue. Deserializer* upload(dynamic_cast<Deserializer*>(is_->upload.Get())); size_t size_queued(upload->GetSize()); size_t size_left(real_size); if ((size_left > 0) && (size_queued > 0)) { size_t read_size(std::min(size_queued, size_left)); upload->Read(buffer, (unsigned int)read_size); size_left -= read_size; } // If we still have bytes to fill, then emit a "upload_chunk" event. if (size_left > 0) { VariantMap eventData; eventData.Insert(MakePair(StringHash("upload"), Variant(is_->upload))); eventData.Insert(MakePair(StringHash("size"), Variant((unsigned int)size_left))); is_->es.SendEvent("upload_chunk", eventData); } // Read as much as we can from the upload buffer queue (again). size_queued = upload->GetSize(); size_left = real_size; if ((size_left > 0) && (size_queued > 0)) { size_t read_size(std::min(size_queued, size_left)); upload->Read(buffer, (unsigned int)read_size); size_left -= read_size; } // If we still have bytes to fill, then something went wrong, so we should abort. if (size_left > 0) { is_->isAborted = true; return CURL_READFUNC_ABORT; } return real_size; }
static int read_trailer(int fd, struct exec_trailer * trail) { unsigned char buffer[TRAILER_SIZE]; lseek(fd, (long) -TRAILER_SIZE, 2); if (read(fd, (char*)buffer, TRAILER_SIZE) < TRAILER_SIZE) return TRUNCATED_FILE; trail->code_size = read_size(buffer); trail->data_size = read_size(buffer+4); trail->symbol_size = read_size(buffer+8); trail->debug_size = read_size(buffer+12); trail->magic = read_size(buffer+16); if (trail->magic == EXEC_MAGIC) return 0; else return BAD_MAGIC_NUM; }
array <array <T> > operator()(std::istream& s) { size_t arr = read_size(s); // number of arrays array <array<T> > a(arr); size_t dim; // dimension of each array for (size_t n = 0; n < arr; n++) { dim = read_size(s); a[n].init(dim); read(a[n], s); } return a; }
int parse_args(RTSOpts * opts, int argc, char *argv[]) { if (argc == 0) return 0; if (strcmp(argv[0], "+RTS") != 0) return 0; int i; for (i = 1; i < argc; i++) { if (strcmp(argv[i], "-RTS") == 0) { return i + 1; } if (argv[i][0] != '-') { fprintf(stderr, "RTS options should start with '-'.\n"); print_usage(stderr); exit(EXIT_FAILURE); } switch (argv[i][1]) { case '?': print_usage(stdout); exit(EXIT_SUCCESS); break; case 's': opts->show_summary = 1; break; case 'H': opts->init_heap_size = read_size(argv[i] + 2); break; case 'K': opts->max_stack_size = read_size(argv[i] + 2); break; default: printf("RTS opts: Wrong argument: %s\n", argv[i]); print_usage(stderr); exit(EXIT_FAILURE); } } return argc; }
/* 向数据包队列中插入数据, 这些数据会按照数据包的结构, 即两个字节的长度紧跟着内容, 分析这些数据有多少个数据包. * 对于其中的完整数据包将插入到完整包队列中去, 对于其中不完整的包将插入到不完整数据包的哈希表中去. 特别是对于 * 长度只有 1 个字节的不完整包, 此时已经无法计算出数据包的长度, 因而不完整数据包的 read 为 -1 , 而 header 为此一个字节值. * 当下一次套接字中接收到数据时会判断 read 的长度, 从而正确拼接数据包. * * 参数: L 是虚拟机栈; fd 是数据所属的套接字 id; buffer 是数据内容; size 是数据大小; * 函数无返回值 */ static void push_more(lua_State *L, int fd, uint8_t *buffer, int size) { if (size == 1) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = -1; uc->header = *buffer; return; } int pack_size = read_size(buffer); buffer += 2; size -= 2; /* 虽然数据包的内容是不完整的, 但是给 pack.buffer 分配的内存是完整的 */ if (size < pack_size) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); memcpy(uc->pack.buffer, buffer, size); return; } push_data(L, fd, buffer, pack_size, 1); buffer += pack_size; size -= pack_size; if (size > 0) { push_more(L, fd, buffer, size); } }
array_2d <T> operator()(std::istream& s) { size_t arr = read_size(s); // number of arrays if (arr < 1) return array_2d <T>(); size_t dim = read_size(s); // dimension of each array array_2d <T> a(dim, arr); for (size_t n = 0, p = 0; n < arr; n++, p += dim) { if(n > 0) dim = read_size(s); read(a[p], s, dim); } return a; }
static void read_file(t_env *e) { int fd; if ((fd = open("src/maps/map01.txt", O_RDONLY)) == -1) exit(EXIT_FAILURE); read_size(e, fd); read_map(e, fd); close(fd); }
std::string Parser::read_item(std::istream &stream) { // An item in an itemized chunk is made up of the big endian size and the payload of that size // Example: // 0000: 4 // 0004: 0xDE 0xAD 0xBE 0xEF // 0008: --- Next item --- auto payload = read_payload(stream, read_size(stream)); return {std::begin(payload), std::end(payload)}; }
std::pair<ChunkId, std::string> Parser::read_chunk(std::istream &stream) { // LastPass blob chunk is made up of 4-byte ID, big endian 4-byte size and payload of that size // Example: // 0000: 'IDID' // 0004: 4 // 0008: 0xDE 0xAD 0xBE 0xEF // 000C: --- Next chunk --- auto id = read_id(stream); auto payload = read_payload(stream, read_size(stream)); return std::make_pair(id, payload); }
static domain_type * read_domain(namedb_type *db, uint32_t domain_count, domain_type **domains) { uint32_t domain_number; if (!read_size(db, &domain_number)) return NULL; if (domain_number == 0 || domain_number > domain_count) return NULL; return domains[domain_number - 1]; }
static zone_type * read_zone(namedb_type *db, uint32_t zone_count, zone_type **zones) { uint32_t zone_number; if (!read_size(db, &zone_number)) return NULL; if (zone_number == 0 || zone_number > zone_count) return NULL; return zones[zone_number - 1]; }
/*Func: read_from_stat() * This fucnction create stat type struct * Read the information from stat() * Check to see if there is error * Calling functions to read info of the file: * timestamp * user_id, group_id * mode, size, numintro */ void read_from_stat(file *info){ int t=0; struct stat fileinfo; t = fstat(info->desc,&fileinfo); //perror("File Stat"); if(t==-1){ printf(CY"Error: File stat cannot be read. \nProgram is now exiting!\n"NC); exit(EXIT_FAILURE); } read_time(info, fileinfo); read_ids(info, fileinfo); read_mode(info, fileinfo); read_size(info, fileinfo); }
int SlimConnectionHandler_Run(SlimConnectionHandler* self) { if (self->sendFunc(self->comLink, "Slim -- V0.0\n", 13) == -1) { return -1; } char* message = NULL; while(1) { int size_i = read_size(self); if (size_i > 0) { free(message); message = (char*)malloc(size_i + (size_t)1); memset(message, 0, (size_t)size_i + (size_t)1); int numbytes = self->recvFunc(self->comLink, message, size_i); if (numbytes != size_i) { printf("did not receive right number of bytes. %d expected but received %d\n", size_i, numbytes); break; } if (strcmp("bye", message) == 0) { break; } //execute and get response char* response_message = self->slimHandlerFunc(self->slimHandler, message); int response_length = (int)strlen(response_message); char * length_buffer = (char *)malloc((size_t)8); sprintf(length_buffer, "%06d:", response_length); int send_result = self->sendFunc(self->comLink, length_buffer, 7); free(length_buffer); send_result = self->sendFunc(self->comLink, response_message, response_length); SlimList_Release(response_message); } else if (size_i == -1) { break; } } free(message); fflush(stdout); return 0; }
int SlimConnectionHandler_Run(SlimConnectionHandler* self) { char * message = malloc(3); message[0] = 0; int numbytes; if (self->sendFunc(self->comLink, "Slim -- V0.0\n", 13) == -1) { return -1; } while(1) { int size_i = read_size(self); if (size_i > 0) { free(message); message = malloc(size_i + 1); memset(message, 0, size_i + 1); if ((numbytes = self->recvFunc(self->comLink, message, size_i)) == -1) break; if (strcmp("bye", message) == 0) break; //execute and get response char* response_message = self->slimHandler(message); int response_length = strlen(response_message); char * response = malloc(response_length + 7 + 1); sprintf(response, "%06d:%s", response_length, response_message); free(response_message); int send_result = self->sendFunc(self->comLink, response, response_length + 7); free(response); if ( send_result == -1) break; } } free(message); return 0; }
static void json_process_field(int indx, void *data, struct format_field *field) { unsigned long long val; if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; fprintf(ofp, "%s\"%s\"", prefix(indx), (char *)data + offset); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) fprintf(ofp, "%s%lld", prefix(indx), (long long int) val); else fprintf(ofp, "%s%llu", prefix(indx), val); } }
/// 从 buffer 中读取数据. 如果 buffer 的数据足够, 那么数据内容会压入到 queue.queue 中; 否则会压入到 queue.hash 中. static void push_more(lua_State *L, int fd, uint8_t *buffer, int size) { // 如果连包头的数据都不足够, 那么先读取包头的第一个字节数据 if (size == 1) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = -1; uc->header = *buffer; return; } // 读取数据包数据大小 int pack_size = read_size(buffer); // 偏移到实际数据内容指针 buffer += 2; // 得到实际数据的内容大小 size -= 2; // 数据内容未能完全读取的情况 if (size < pack_size) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = size; // 记录已经读取的数据大小 uc->pack.size = pack_size; // 记录数据内容的大小 uc->pack.buffer = skynet_malloc(pack_size); memcpy(uc->pack.buffer, buffer, size); return; } // 如果数据内容能够完全读取, 那么读取数据, 并且压入到 queue.queue 中 push_data(L, fd, buffer, pack_size, 1); // 继续读取剩余的函数 buffer += pack_size; size -= pack_size; if (size > 0) { push_more(L, fd, buffer, size); } }
/* [lua_api] 将套接字发送过来的数据返回给 Lua 层使用. 函数首先将检查 queue 的不完整数据包哈希表中是否存在套接字 fd 的 * 不完整数据包, 如果有则检查其已经读取的内容长度, 并将剩余部分复制过去, 如果刚好是一个数据包大小就直接返回 "data" 字符 * 串和数据包给 Lua 层, 如果复制后多于一个数据包, 则将这些数据包一起插入到 queue 中, 并返回 "more" 字符串给 Lua 层. * 在不足一个数据包的情况下, 将不完整包重新插入到哈希表中. 对于之前没有不完整包的情况执行相同的流程. 当返回 "more" 时, * 可以通过 lpop 函数取得 queue 中的数据包. buffer 中的内容会复制到新的内存块中, 因而在调用完此函数之后需要释放 buffer 的内存; * * 参数: L 是虚拟机栈, 其位置一就是 queue 数据结构; fd 是套接字 id; buffer 是数据内容; size 是数据大小; * * 返回: userdata[1] 为 queue 数据结构; string/nil[2] 为 "more" 或者 "data" 表示有数据, nil 表示数据不完整; * int[3] 为套接字 id, 仅在 "data" 下返回; lightuserdata[4] 为数据内容, 仅在 "data" 下返回; * int[5] 为数据大小, 仅在 "data" 下返回; */ static int filter_data_(lua_State *L, int fd, uint8_t * buffer, int size) { struct queue *q = lua_touserdata(L,1); struct uncomplete * uc = find_uncomplete(q, fd); if (uc) { // fill uncomplete if (uc->read < 0) { // read size assert(uc->read == -1); int pack_size = *buffer; pack_size |= uc->header << 8 ; ++buffer; --size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); uc->read = 0; } int need = uc->pack.size - uc->read; if (size < need) { memcpy(uc->pack.buffer + uc->read, buffer, size); uc->read += size; int h = hash_fd(fd); uc->next = q->hash[h]; q->hash[h] = uc; return 1; } memcpy(uc->pack.buffer + uc->read, buffer, need); buffer += need; size -= need; if (size == 0) { lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); lua_pushinteger(L, fd); lua_pushlightuserdata(L, uc->pack.buffer); lua_pushinteger(L, uc->pack.size); skynet_free(uc); return 5; } // more data push_data(L, fd, uc->pack.buffer, uc->pack.size, 0); skynet_free(uc); push_more(L, fd, buffer, size); lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); return 2; } else { if (size == 1) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = -1; uc->header = *buffer; return 1; } int pack_size = read_size(buffer); buffer+=2; size-=2; if (size < pack_size) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); memcpy(uc->pack.buffer, buffer, size); return 1; } if (size == pack_size) { // just one package lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); lua_pushinteger(L, fd); void * result = skynet_malloc(pack_size); memcpy(result, buffer, size); lua_pushlightuserdata(L, result); lua_pushinteger(L, size); return 5; } // more data push_data(L, fd, buffer, pack_size, 1); buffer += pack_size; size -= pack_size; push_more(L, fd, buffer, size); lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); return 2; } }
static int process_part(const char *data, size_t len) { size_t maxlen; size_t amount_read = 0; switch(sm.state) { case IN_HEADER: maxlen = (sizeof sm.header - sm.data_read); if (maxlen > len) maxlen = len; memcpy(((char *)&sm.header) + sm.data_read, data, maxlen); sm.data_read += maxlen; amount_read = maxlen; sm.total_data_read += amount_read; break; case IN_LONG_FILENAME: maxlen = sm.filedata_left; if (maxlen > len) maxlen = len; assert(sm.longfilename); memcpy(sm.longfilename + sm.data_read, data, maxlen); sm.data_read += maxlen; sm.filedata_left -= maxlen; sm.until_header_left -= maxlen; amount_read = maxlen; sm.total_data_read += amount_read; if (sm.filedata_left == 0) { if (strlen(sm.longfilename) > sm.data_read) fatal_error("index_from_tar: Wrong long-filename length"); sm.state = IN_EXTRA_DATA; sm.data_read = 0; /* Here we don't update the smblock! we want to * keep it as the start of the long name header */ } break; case IN_EXTRA_DATA: maxlen = sm.until_header_left; if (maxlen > len) maxlen = len; sm.data_read += maxlen; sm.until_header_left -= maxlen; amount_read = maxlen; sm.total_data_read += amount_read; if (sm.until_header_left == 0) { sm.state = IN_HEADER; sm.data_read = 0; advance_block(); } break; case IN_DATA: maxlen = sm.filedata_left; if (maxlen > len) maxlen = len; sm.data_read += maxlen; sm.filedata_left -= maxlen; sm.until_header_left -= maxlen; amount_read = maxlen; if (rsync_signature && maxlen > 0) rsync_signature_work(rsync_signature, data, maxlen); sm.total_data_read += amount_read; if (command_line.debug > 2) fprintf(stderr, "index_from_tar: still %lli to skip\n", sm.filedata_left); if (sm.filedata_left == 0) { sm.state = IN_EXTRA_DATA; sm.data_read = 0; advance_block(); if (rsync_signature) { size_t siglen; size_t len; int res; /* Mark end of file to rsync */ rsync_signature_work(rsync_signature, 0, 0); /* Should output the block name too */ siglen = rsync_signature_size(rsync_signature); len = siglen + strlen(index_filename) + 1 /*\0*/; mytar_set_size(indextar, len); mytar_write_header(indextar); /* The name, and \0 */ res = mytar_write_data(indextar, index_filename, strlen(index_filename) + 1); if (res == -1) error("Cannot write index tar 1 - mytar_write_data"); /* Then the rsync signature */ res = mytar_write_data(indextar, rsync_signature->output, siglen); if (res == -1) error("Cannot write index tar 2 - mytar_write_data"); rsync_signature_free(rsync_signature); rsync_signature = 0; res = mytar_write_end(indextar); if (res == -1) error("Cannot write index tar - mytar_write_end"); } } break; } if (sm.state == IN_HEADER && sm.data_read == sizeof sm.header) { const struct header_gnu_tar *sh = &sm.header; int checksum; unsigned long long size; int should_rsync = 0; /* Process the header */ if (sh->checksum[0] == '\0') { /* Skip block, it should be final zero block. We could check it. */ sm.data_read = 0; sm.state = IN_HEADER; return amount_read; } checksum = read_octal_number(sh->checksum, sizeof sh->checksum); if (calc_checksum(sh) != checksum) error("Failed checksum interpreting tar"); size = read_size(sh->size); sm.filedata_left = size; sm.until_header_left = size; /* Clamp at 512 bytes */ if (sm.until_header_left % 512 > 0) sm.until_header_left += 512 - sm.until_header_left % 512; if (sh->typeflag[0] == 'L') { sm.longfilename = malloc(sm.filedata_left+1); if (!sm.longfilename) fatal_error("Cannot allocate"); sm.longfilename[sm.filedata_left] = '\0'; sm.state = IN_LONG_FILENAME; sm.data_read = 0; return amount_read; } if (sh->typeflag[0] == 'K') /* Long link name. We ignore it */ { sm.state = IN_DATA; sm.data_read = 0; return amount_read; } /* Check that we parse a GNU tar archive, * and that we know the type. */ if (strncmp(sh->magic, "ustar ", 6) != 0 || strncmp(sh->version, " \0", 2) != 0 || (sh->typeflag[0] != '0' && sh->typeflag[0] != '5' && sh->typeflag[0] != '2' && sh->typeflag[0] != '1' )) { /* Unknown file. Skip data. */ char *fname; fname = read_fixed_size_string(sh->name, sizeof sh->name); fprintf(stderr, "Unknown header type, skipping %llu bytes: %s\n", sm.until_header_left, fname); free(fname); sm.data_read = 0; if (sm.filedata_left > 0) sm.state = IN_DATA; else { sm.state = IN_HEADER; advance_block(); } return amount_read; } mytar_new_file(indextar); if (sm.longfilename) { if (command_line.debug > 1) fprintf(stderr, "Writing index entry for the file %s\n", sm.longfilename); mytar_set_filename(indextar, sm.longfilename); free(sm.longfilename); sm.longfilename = 0; } else { char name[sizeof sh->name + 1]; strcpyn(name, sh->name, sizeof sh->name); if (command_line.debug > 1) fprintf(stderr, "Writing index entry for the file %s\n", name); mytar_set_filename(indextar, name); } mytar_set_mode(indextar, read_octal_number(sh->mode, sizeof sh->mode)); mytar_set_size(indextar, 0); mytar_set_uid(indextar, read_octal_number(sh->uid, sizeof sh->uid)); mytar_set_gid(indextar, read_octal_number(sh->gid, sizeof sh->gid)); mytar_set_uname(indextar, sh->uname); /* internal strncopy will save harm */ mytar_set_gname(indextar, sh->gname); /* internal strncopy will save harm */ switch(sh->typeflag[0]) { case '5': mytar_set_filetype(indextar, S_IFDIR); break; case '0': if (command_line.should_rsync && sm.filedata_left > command_line.rsync_minimal_size) should_rsync = 1; if (should_rsync) mytar_set_filetype(indextar, S_IFREG); else mytar_set_filetype(indextar, S_IFLNK); break; case '1': case '2': mytar_set_filetype(indextar, S_IFLNK); break; default: error("Indexing unknown file type"); } mytar_set_mtime(indextar, read_octal_number(sh->mtime, sizeof sh->mtime)); mytar_set_atime(indextar, read_octal_number(sh->mtime, sizeof sh->mtime)); { /* Start of block file - header */ snprintf(index_filename, sizeof index_filename, "block%zu.tar%s_%llu", sm.block, get_filter_extensions(filter), (unsigned long long) size); if (sh->typeflag[0] != '5' && !should_rsync) mytar_set_linkname(indextar, index_filename); } if (should_rsync) { assert(rsync_signature == 0); rsync_signature = rsync_signature_new(); } else mytar_write_header(indextar); sm.data_read = 0; if (sm.filedata_left > 0) { if (command_line.debug > 1) fprintf(stderr, "index_from_tar: Going to skip %llu bytes\n", sm.filedata_left); sm.state = IN_DATA; } else { sm.state = IN_HEADER; advance_block(); } } return amount_read; }
static void request_start(struct client_context *client, char *req_buf) { struct socks5_request *req = (struct socks5_request *)req_buf; struct remote_context *remote = client->remote; assert(remote->stage == XSTAGE_FORWARD); client->cmd = req->cmd; if (req->cmd != S5_CMD_CONNECT && req->cmd != S5_CMD_UDP_ASSOCIATE) { logger_log(LOG_ERR, "unsupported cmd: 0x%02x", req->cmd); request_ack(client, S5_REP_CMD_NOT_SUPPORTED); return; } if (req->cmd == S5_CMD_UDP_ASSOCIATE) { request_ack(client, S5_REP_SUCCESSED); return; } char buf[260] = {0}; size_t buflen; uint16_t portlen = 2; /* * * xsocks request * +------+----------+----------+ * | ATYP | BND.ADDR | BND.PORT | * +------+----------+----------+ * | 1 | Variable | 2 | * +------+----------+----------+ * */ if (req->atyp == ATYP_IPV4) { size_t in_addr_len = sizeof(struct in_addr); buflen = sizeof(struct xsocks_request) + in_addr_len + portlen; buf[0] = ATYP_IPV4; memcpy(buf + 1, req->addr, in_addr_len); memcpy(buf + 1 + in_addr_len, req->addr + in_addr_len, portlen); uv_inet_ntop(AF_INET, (const void *)(req->addr), client->target_addr, INET_ADDRSTRLEN); uint16_t port = read_size((uint8_t*)(req->addr + in_addr_len)); sprintf(client->target_addr, "%s:%u", client->target_addr, port); } else if (req->atyp == ATYP_HOST) { uint8_t namelen = *(uint8_t *)(req->addr); if (namelen > 0xFF) { logger_log(LOG_ERR, "unsupported address type: 0x%02x", req->atyp); request_ack(client, S5_REP_ADDRESS_TYPE_NOT_SUPPORTED); return; } buflen = sizeof(struct xsocks_request) + 1 + namelen + portlen; buf[0] = ATYP_HOST; memcpy(buf + 1, req->addr, 1 + namelen); memcpy(buf + 1 + 1 + namelen, req->addr + 1 + namelen, portlen); memcpy(client->target_addr, req->addr + 1, namelen); uint16_t port = read_size((uint8_t*)(req->addr + 1 + namelen)); sprintf(client->target_addr, "%s:%u", client->target_addr, port); } else if (req->atyp == ATYP_IPV6) { size_t in6_addr_len = sizeof(struct in6_addr); buflen = sizeof(struct xsocks_request) + in6_addr_len + portlen; buf[0] = ATYP_IPV6; memcpy(buf + 1, req->addr, in6_addr_len); memcpy(buf + 1 + in6_addr_len, req->addr + in6_addr_len, portlen); uv_inet_ntop(AF_INET6, (const void *)(req->addr), client->target_addr, INET_ADDRSTRLEN); uint16_t port = read_size((uint8_t*)(req->addr + in6_addr_len)); sprintf(client->target_addr, "%s:%u", client->target_addr, port); } else { logger_log(LOG_ERR, "unsupported address type: 0x%02x", req->atyp); request_ack(client, S5_REP_ADDRESS_TYPE_NOT_SUPPORTED); return; } request_ack(client, S5_REP_SUCCESSED); // TODO: handle UDP ASSOCIATE if (req->cmd == S5_CMD_CONNECT) { if (verbose) { logger_log(LOG_INFO, "connect to %s", client->target_addr); } int clen = buflen + PRIMITIVE_BYTES; uint8_t *c = client->buf + HEADER_BYTES; int rc = crypto_encrypt(c, (uint8_t *)buf, buflen); if (!rc) { forward_to_remote(remote, c, clen); } } }
static void perl_process_event(int cpu, void *data, int size __unused, unsigned long long nsecs, char *comm) { struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; struct event *event; int type; int pid; dSP; type = trace_parse_common_type(data); event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); pid = trace_parse_common_pid(data); sprintf(handler, "%s::%s", event->system, event->name); s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(s))); XPUSHs(sv_2mortal(newSVuv(ns))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); /* common fields other than pid can be accessed via xsub fns */ for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { XPUSHs(sv_2mortal(newSVuv(val))); } } } PUTBACK; if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(nsecs))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); call_pv("main::trace_unhandled", G_SCALAR); } SPAGAIN; PUTBACK; FREETMPS; LEAVE; }
static void parse_content(int level, int64_t end_pos) { while (static_cast<int64_t>(g_in->getFilePointer()) < end_pos) { int64_t element_start_pos = g_in->getFilePointer(); try { vint_c id = read_id(end_pos); vint_c size = read_size(end_pos); std::string element_name = g_element_names[id.value]; if (element_name.empty()) element_name = Y("unknown"); mxinfo(boost::format(Y("%1%pos %2% id 0x%|3$x| size %4% header size %5% (%6%)\n")) % level_string(level) % element_start_pos % id.value % size.value % (id.coded_size + size.coded_size) % element_name); if (size.is_unknown()) { mxinfo(boost::format(Y("%1% Warning: size is coded as 'unknown' (all bits are set)\n")) % level_string(level)); // In Matroska segments often have an unknown size – so don't // warn about it. if (element_name != "Segment") g_warnings_found = true; } int64_t content_end_pos = size.is_unknown() ? end_pos : g_in->getFilePointer() + size.value; if (content_end_pos > end_pos) { mxinfo(boost::format(Y("%1% Error: Element ends after scope\n")) % level_string(level)); g_errors_found = true; if (!g_in->setFilePointer2(end_pos)) mxerror(boost::format(Y("Error: Seek to %1%\n")) % end_pos); return; } if (g_is_master[id.value]) parse_content(level + 1, content_end_pos); if (!g_in->setFilePointer2(content_end_pos)) mxerror(boost::format(Y("Error: Seek to %1%\n")) % content_end_pos); } catch (id_error_c &error) { std::string message = id_error_c::end_of_file == error.code ? Y("End of file") : id_error_c::end_of_scope == error.code ? Y("End of scope") : id_error_c::first_byte_is_zero == error.code ? Y("First byte is zero") : id_error_c::longer_than_four_bytes == error.code ? Y("ID is longer than four bytes") : Y("reason is unknown"); mxinfo(boost::format(Y("%1%Error at %2%: error reading the element ID (%3%)\n")) % level_string(level) % element_start_pos % message); g_errors_found = true; if (!g_in->setFilePointer2(end_pos)) mxerror(boost::format(Y("Error: Seek to %1%\n")) % end_pos); return; } catch (size_error_c &error) { std::string message = size_error_c::end_of_file == error.code ? Y("End of file") : size_error_c::end_of_scope == error.code ? Y("End of scope") : Y("reason is unknown"); mxinfo(boost::format(Y("%1%Error at %2%: error reading the element size (%3%)\n")) % level_string(level) % element_start_pos % message); g_errors_found = true; if (!g_in->setFilePointer2(end_pos)) mxerror(boost::format(Y("Error: Seek to %1%\n")) % end_pos); return; } catch (...) { mxerror(Y("Unknown error occured\n")); } } }
int write_entries(const int fd, struct tar_t ** archive, struct tar_t ** head, const size_t filecount, const char * files[], int * offset, const char verbosity){ if (fd < 0){ return -1; } if (!archive || *archive){ return -1; } if (filecount && !files){ return -1; } // add new data struct tar_t ** tar = archive; // current entry char buf[512]; // one block buffer for(unsigned int i = 0; i < filecount; i++){ *tar = malloc(sizeof(struct tar_t)); // stat file if (format_tar_data(*tar, files[i], verbosity) < 0){ WRITE_ERROR(stderr, "Error: Failed to stat %s\n", files[i]); } if (!i){ *archive = *tar; // store first address } (*tar) -> begin = *offset; // write different data depending on file type if ((*tar) -> type == DIRECTORY){ // save parent directory name (source will change) const size_t len = strlen((*tar) -> name); char * parent = calloc(len + 1, sizeof(char)); strncpy(parent, (*tar) -> name, len); // add a '/' character to the end if ((len < 99) && ((*tar) -> name[len - 1] != '/')){ (*tar) -> name[len] = '/'; (*tar) -> name[len + 1] = '\0'; calculate_checksum((*tar)); } V_PRINT(stdout, "%s\n", (*tar) -> name); // write metadata to (*tar) file if (write_size(fd, (*tar) -> block, 512) != 512){ WRITE_ERROR(stderr, "Error: Failed to write metadata to archive\n"); } // go through directory DIR * d = opendir(parent); if (!d){ WRITE_ERROR(stderr, "Error: Cannot read directory %s\n", parent); } struct dirent * dir; while ((dir = readdir(d))){ // if not special directories . and .. const size_t sublen = strlen(dir -> d_name); if (strncmp(dir -> d_name, ".", sublen) && strncmp(dir -> d_name, "..", sublen)){ char * path = calloc(len + sublen + 2, sizeof(char)); sprintf(path, "%s/%s", parent, dir -> d_name); // recursively write each subdirectory if (write_entries(fd, &((*tar) -> next), head, 1, (const char **) &path, offset, verbosity) < 0){ WRITE_ERROR(stderr, "Error: Recurse error\n"); } // go to end of new data while ((*tar) -> next){ tar = &((*tar) -> next); } free(path); } } free(parent); closedir(d); } else{ // if (((*tar) -> type == REGULAR) || ((*tar) -> type == NORMAL) || ((*tar) -> type == CONTIGUOUS) || ((*tar) -> type == SYMLINK) || ((*tar) -> type == CHAR) || ((*tar) -> type == BLOCK) || ((*tar) -> type == FIFO)){ V_PRINT(stdout, "%s\n", (*tar) -> name); char tarred = 0; // whether or not the file has already been put into the archive if (((*tar) -> type == REGULAR) || ((*tar) -> type == NORMAL) || ((*tar) -> type == CONTIGUOUS) || ((*tar) -> type == SYMLINK)){ struct tar_t * found = exists(*head, files[i], 1); tarred = (found != (*tar)); // if file has already been included, modify the header if (tarred){ // change type to hard link (*tar) -> type = HARDLINK; // change link name to (*tar)red file name (both are the same) strncpy((*tar) -> link_name, (*tar) -> name, 100); // change size to 0 strncpy((*tar) -> size, "00000000000", 11); // recalculate checksum calculate_checksum((*tar)); } } // write metadata to (*tar) file if (write_size(fd, (*tar) -> block, 512) != 512){ WRITE_ERROR(stderr, "Error: Failed to write metadata to archive\n"); } if (((*tar) -> type == REGULAR) || ((*tar) -> type == NORMAL) || ((*tar) -> type == CONTIGUOUS)){ // if the file isn't already in the tar file, copy the contents in if (!tarred){ int f = open((*tar) -> name, O_RDONLY); if (f < 0){ WRITE_ERROR(stderr, "Error: Could not open %s\n", files[i]); } int r = 0; while ((r = read_size(f, buf, 512)) > 0){ if (write_size(fd, buf, r) != r){ RC_ERROR(stderr, "Error: Could not write to archive: %s\n", strerror(rc)); } } close(f); } } // pad data to fill block const unsigned int size = oct2uint((*tar) -> size, 11); const unsigned int pad = 512 - size % 512; if (pad != 512){ for(unsigned int j = 0; j < pad; j++){ if (write_size(fd, "\0", 1) != 1){ WRITE_ERROR(stderr, "Error: Could not write padding data\n"); } } *offset += pad; } *offset += size; tar = &((*tar) -> next); } // add metadata size *offset += 512; } return 0; }
int extract_entry(const int fd, struct tar_t * entry, const char verbosity){ V_PRINT(stdout, "%s\n", entry -> name); if ((entry -> type == REGULAR) || (entry -> type == NORMAL) || (entry -> type == CONTIGUOUS)){ // create intermediate directories size_t len = strlen(entry -> name); char * path = calloc(len + 1, sizeof(char)); strncpy(path, entry -> name, len); // remove file from path while (--len && (path[len] != '/')); path[len] = '\0'; // if nothing was found, path is terminated if (recursive_mkdir(path, DEFAULT_DIR_MODE, verbosity) < 0){ V_PRINT(stderr, "Error: Could not make directory %s\n", path); free(path); return -1; } free(path); if ((entry -> type == REGULAR) || (entry -> type == NORMAL) || (entry -> type == CONTIGUOUS)){ // create file const unsigned int size = oct2uint(entry -> size, 11); int f = open(entry -> name, O_WRONLY | O_CREAT | O_TRUNC, oct2uint(entry -> mode, 7) & 0777); if (f < 0){ RC_ERROR(stderr, "Error: Unable to open file %s: %s\n", entry -> name, strerror(rc)); } // move archive pointer to data location if (lseek(fd, 512 + entry -> begin, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Bad index: %s\n", strerror(rc)); } // copy data to file char buf[512]; int got = 0; while (got < size){ int r; if ((r = read_size(fd, buf, MIN(size - got, 512))) < 0){ EXIST_ERROR(stderr, "Error: Unable to read from archive: %s\n", strerror(rc)); } if (write(f, buf, r) != r){ EXIST_ERROR(stderr, "Error: Unable to write to %s: %s\n", entry -> name, strerror(rc)); } got += r; } close(f); } else if ((entry -> type == CHAR) || (entry -> type == BLOCK)){ if (mknod(entry -> name, oct2uint(entry -> mode, 7), (oct2uint(entry -> major, 7) << 20) | oct2uint(entry -> minor, 7)) < 0){ EXIST_ERROR(stderr, "Error: Unable to make device %s: %s\n", entry -> name, strerror(rc)); } } } else if (entry -> type == HARDLINK){ if (link(entry -> link_name, entry -> name) < 0){ EXIST_ERROR(stderr, "Error: Unable to create hardlink %s: %s\n", entry -> name, strerror(rc)); } } else if (entry -> type == SYMLINK){ if (symlink(entry -> link_name, entry -> name) < 0){ EXIST_ERROR(stderr, "Error: Unable to make symlink %s: %s\n", entry -> name, strerror(rc)); } } else if (entry -> type == CHAR){ if (mknod(entry -> name, S_IFCHR | (oct2uint(entry -> mode, 7) & 0777), (oct2uint(entry -> major, 7) << 20) | oct2uint(entry -> minor, 7)) < 0){ EXIST_ERROR(stderr, "Error: Unable to create directory %s: %s\n", entry -> name, strerror(rc)); } } else if (entry -> type == BLOCK){ if (mknod(entry -> name, S_IFBLK | (oct2uint(entry -> mode, 7) & 0777), (oct2uint(entry -> major, 7) << 20) | oct2uint(entry -> minor, 7)) < 0){ EXIST_ERROR(stderr, "Error: Unable to create directory %s: %s\n", entry -> name, strerror(rc)); } } else if (entry -> type == DIRECTORY){ if (recursive_mkdir(entry -> name, oct2uint(entry -> mode, 7) & 0777, verbosity) < 0){ EXIST_ERROR(stderr, "Error: Unable to create directory %s: %s\n", entry -> name, strerror(rc)); } } else if (entry -> type == FIFO){ if (mkfifo(entry -> name, oct2uint(entry -> mode, 7) & 0777) < 0){ EXIST_ERROR(stderr, "Error: Unable to make pipe %s: %s\n", entry -> name, strerror(rc)); } } return 0; }
int tar_remove(const int fd, struct tar_t ** archive, const size_t filecount, const char * files[], const char verbosity){ if (fd < 0){ return -1; } // archive has to exist if (!archive || !*archive){ return -1; } if (filecount && !files){ return -1; } if (!filecount){ return 0; } // get file permissions struct stat st; if (fstat(fd, &st)){ RC_ERROR(stderr, "Error: Unable to stat archive: %s\n", strerror(rc)); } // reset offset of original file if (lseek(fd, 0, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Unable to seek file: %s\n", strerror(rc)); } // find first file to be removed that does not exist int ret = 0; char * bad = calloc(filecount, sizeof(char)); for(int i = 0; i < filecount; i++){ if (!exists(*archive, files[i], 0)){ V_PRINT(stderr, "Error: %s not found in archive\n", files[i]); bad[i] = 1; ret = -1; } } unsigned int read_offset = 0; unsigned int write_offset = 0; struct tar_t * prev = NULL; struct tar_t * curr = *archive; while(curr){ // get original size int total = 512; if ((curr -> type == REGULAR) || (curr -> type == NORMAL) || (curr -> type == CONTIGUOUS)){ total += oct2uint(curr -> size, 11); if (total % 512){ total += 512 - (total % 512); } } const int match = check_match(curr, filecount, bad, files); if (match < 0){ V_PRINT(stderr, "Error: Match failed\n"); return -1; } else if (!match){ // if the old data is not in the right place, move it if (write_offset < read_offset){ int got = 0; while (got < total){ // go to old data if (lseek(fd, read_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } char buf[512]; // copy chunk out if (read_size(fd, buf, 512) != 512){// guarenteed 512 octets V_PRINT(stderr, "Error: Read error\n"); return -1; } // go to new position if (lseek(fd, write_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } // write data in if (write_size(fd, buf, 512) != 512){ V_PRINT(stderr, "Error: Write error\n"); return -1; } // increment offsets got += 512; read_offset += 512; write_offset += 512; } } else{ read_offset += total; write_offset += total; // skip past data if (lseek(fd, read_offset, SEEK_SET) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Cannot seek: %s\n", strerror(rc)); } } prev = curr; curr = curr -> next; } else{// if name matches, skip the data struct tar_t * tmp = curr; if (!prev){ *archive = curr -> next; if (*archive){ (*archive) -> begin = 0; } } else{ prev -> next = curr -> next; if (prev -> next){ prev -> next -> begin = curr -> begin; } } curr = curr -> next; free(tmp); // next read starts after current entry read_offset += total; } } // resize file if (ftruncate(fd, write_offset) < 0){ RC_ERROR(stderr, "Error: Could not truncate file: %s\n", strerror(rc)); } // add end data if (write_end_data(fd, write_offset, verbosity) < 0){ V_PRINT(stderr, "Error: Could not close file\n"); } return ret; }
static void python_process_event(int cpu, void *data, int size __unused, unsigned long long nsecs, char *comm) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; static char handler_name[256]; struct format_field *field; unsigned long long val; unsigned long s, ns; struct event *event; unsigned n = 0; int type; int pid; t = PyTuple_New(MAX_FIELDS); if (!t) Py_FatalError("couldn't create Python tuple"); type = trace_parse_common_type(data); event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); pid = trace_parse_common_pid(data); sprintf(handler_name, "%s__%s", event->system, event->name); handler = PyDict_GetItemString(main_dict, handler_name); if (handler && !PyCallable_Check(handler)) handler = NULL; if (!handler) { dict = PyDict_New(); if (!dict) Py_FatalError("couldn't create Python dict"); } s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; context = PyCObject_FromVoidPtr(scripting_context, NULL); PyTuple_SetItem(t, n++, PyString_FromString(handler_name)); PyTuple_SetItem(t, n++, PyCObject_FromVoidPtr(scripting_context, NULL)); if (handler) { PyTuple_SetItem(t, n++, PyInt_FromLong(cpu)); PyTuple_SetItem(t, n++, PyInt_FromLong(s)); PyTuple_SetItem(t, n++, PyInt_FromLong(ns)); PyTuple_SetItem(t, n++, PyInt_FromLong(pid)); PyTuple_SetItem(t, n++, PyString_FromString(comm)); } else { PyDict_SetItemString(dict, "common_cpu", PyInt_FromLong(cpu)); PyDict_SetItemString(dict, "common_s", PyInt_FromLong(s)); PyDict_SetItemString(dict, "common_ns", PyInt_FromLong(ns)); PyDict_SetItemString(dict, "common_pid", PyInt_FromLong(pid)); PyDict_SetItemString(dict, "common_comm", PyString_FromString(comm)); } for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; obj = PyString_FromString((char *)data + offset); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { if ((long long)val >= LONG_MIN && (long long)val <= LONG_MAX) obj = PyInt_FromLong(val); else obj = PyLong_FromLongLong(val); } else { if (val <= LONG_MAX) obj = PyInt_FromLong(val); else obj = PyLong_FromUnsignedLongLong(val); } } if (handler) PyTuple_SetItem(t, n++, obj); else PyDict_SetItemString(dict, field->name, obj); } if (!handler) PyTuple_SetItem(t, n++, dict); if (_PyTuple_Resize(&t, n) == -1) Py_FatalError("error resizing Python tuple"); if (handler) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die(handler_name); } else { handler = PyDict_GetItemString(main_dict, "trace_unhandled"); if (handler && PyCallable_Check(handler)) { retval = PyObject_CallObject(handler, t); if (retval == NULL) handler_call_die("trace_unhandled"); } Py_DECREF(dict); } Py_DECREF(t); }
int quicktime_atom_read_header(quicktime_t *file, quicktime_atom_t *atom) { int result = 0; char header[10]; if(file->use_avi) { reset(atom); atom->start = quicktime_position(file); if(!quicktime_read_data(file, header, HEADER_LENGTH)) return 1; atom->type[0] = header[0]; atom->type[1] = header[1]; atom->type[2] = header[2]; atom->type[3] = header[3]; atom->type[4] = 0; atom->size = (((unsigned char)header[4]) ) | (((unsigned char)header[5]) << 8 ) | (((unsigned char)header[6]) << 16) | (((unsigned char)header[7]) << 24); atom->end = quicktime_add3(atom->start, atom->size, 8); } else { int64_t size2; reset(atom); atom->start = quicktime_position(file); if(!quicktime_read_data(file, header, HEADER_LENGTH)) return 1; result = read_type(header, atom->type); atom->size = read_size(header); atom->end = atom->start + atom->size; /* * printf("quicktime_atom_read_header 1 %c%c%c%c start %llx size %llx end %llx ftell %llx %llx\n", * atom->type[0], atom->type[1], atom->type[2], atom->type[3], * atom->start, atom->size, atom->end, * file->file_position, * (int64_t)FTELL(file->stream)); */ /* Skip placeholder atom */ if(quicktime_match_32(atom->type, "wide")) { atom->start = quicktime_position(file); reset(atom); if(!quicktime_read_data(file, header, HEADER_LENGTH)) return 1; result = read_type(header, atom->type); atom->size -= 8; if(atom->size <= 0) { /* Wrapper ended. Get new atom size */ atom->size = read_size(header); } atom->end = atom->start + atom->size; } else /* Get extended size */ if(atom->size == 1) { if(!quicktime_read_data(file, header, HEADER_LENGTH)) return 1; atom->size = read_size64(header); atom->end = atom->start + atom->size; /* * printf("quicktime_atom_read_header 2 %c%c%c%c start %llx size %llx end %llx ftell %llx\n", * atom->type[0], atom->type[1], atom->type[2], atom->type[3], * atom->start, atom->size, atom->end, * file->file_position); */ } } return result; }
int tar_read(const int fd, struct tar_t ** archive, const char verbosity){ if (fd < 0){ return -1; } if (!archive || *archive){ return -1; } unsigned int offset = 0; int count = 0; struct tar_t ** tar = archive; char update = 1; for(count = 0; ; count++){ *tar = malloc(sizeof(struct tar_t)); if (update && (read_size(fd, (*tar) -> block, 512) != 512)){ V_PRINT(stderr, "Error: Bad read. Stopping\n"); tar_free(*tar); *tar = NULL; break; } update = 1; // if current block is all zeros if (iszeroed((*tar) -> block, 512)){ if (read_size(fd, (*tar) -> block, 512) != 512){ V_PRINT(stderr, "Error: Bad read. Stopping\n"); tar_free(*tar); *tar = NULL; break; } // check if next block is all zeros as well if (iszeroed((*tar) -> block, 512)){ tar_free(*tar); *tar = NULL; // skip to end of record if (lseek(fd, RECORDSIZE - (offset % RECORDSIZE), SEEK_CUR) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Unable to seek file: %s\n", strerror(rc)); } break; } update = 0; } // set current entry's file offset (*tar) -> begin = offset; // skip over data and unfilled block unsigned int jump = oct2uint((*tar) -> size, 11); if (jump % 512){ jump += 512 - (jump % 512); } // move file descriptor offset += 512 + jump; if (lseek(fd, jump, SEEK_CUR) == (off_t) (-1)){ RC_ERROR(stderr, "Error: Unable to seek file: %s\n", strerror(rc)); } // ready next value tar = &((*tar) -> next); } return count; }