/// 从 queue 中查询到符合的 uncomplete, 同时将查询到的 uncomplete 从 queue.hash 中删除 static struct uncomplete * find_uncomplete(struct queue *q, int fd) { if (q == NULL) return NULL; int h = hash_fd(fd); struct uncomplete * uc = q->hash[h]; if (uc == NULL) return NULL; // 返回查询到的元素 if (uc->pack.id == fd) { q->hash[h] = uc->next; return uc; } struct uncomplete * last = uc; while (last->next) { uc = last->next; if (uc->pack.id == fd) { last->next = uc->next; return uc; } last = uc; } return NULL; }
static void hash_object(const char *path, const char *type, const char *vpath, unsigned flags, int literally) { int fd; fd = open(path, O_RDONLY); if (fd < 0) die_errno("Cannot open '%s'", path); hash_fd(fd, type, vpath, flags, literally); }
static void hash_object(const char *path, const char *type, int write_object, const char *vpath) { int fd; fd = open(path, O_RDONLY); if (fd < 0) die("Cannot open %s", path); hash_fd(fd, type, write_object, vpath); }
void stat_cache_update(struct hash *hash, const char *path, const struct hash *path_hash, int do_hash) { initialize(); // lstat the file struct stat st; if (real_lstat(path, &st) < 0) { int errno_ = errno; if (errno_ == ENOENT || errno_ == ENOTDIR) { // Set hash to zero to represent nonexistent file memset(hash, 0, sizeof(struct hash)); return; } die("lstat(\"%s\") failed: %s", path, strerror(errno_)); } // TODO: We currently ignore the S_ISLNK flag, which assumes that traced // processes never detect symlinks via lstat and never create them. // For now we go the simple route and hold the stat_cache lock for the // entire duration of the hash computation. In future we may want to drop // the lock while we compute the hash. Alternatively, switching to a finer // grain locking discipline might avoid the problem. shared_map_lock(&stat_cache); // Lookup entry, creating it if necessary, and check if it's up to date struct stat_cache_entry *entry; if (!shared_map_lookup(&stat_cache, path_hash, (void**)&entry, 1) || entry->st_mtimespec.tv_nsec != st.st_mtimespec.tv_nsec || entry->st_mtimespec.tv_sec != st.st_mtimespec.tv_sec || entry->st_size != st.st_size || entry->st_ino != st.st_ino || (do_hash && hash_is_all_one(&entry->contents_hash))) { // Entry is new or out of date. In either case, compute hash and // record new stat details. entry->st_ino = st.st_ino; entry->st_mtimespec = st.st_mtimespec; entry->st_size = st.st_size; if (do_hash) { // Hash the file int fd = real_open(path, O_RDONLY, 0); if (fd < 0) die("can't open '%s' to compute hash", path); hash_fd(&entry->contents_hash, fd); real_close(fd); } else memset(&entry->contents_hash, -1, sizeof(struct hash)); } shared_map_unlock(&stat_cache); if (do_hash) *hash = entry->contents_hash; else memset(hash, -1, sizeof(struct hash)); }
/* 向位于虚拟机栈位置一的包队列中插入一个不完整的包. 并返回这个包的地址, 此时包中还没有内容. * 插入的位置遵照 fd 的哈希值获得. * * 参数: L 是 Lua 虚拟机栈; fd 是套接字 id; * 返回: 新生成的不完整包对象 */ static struct uncomplete * save_uncomplete(lua_State *L, int fd) { struct queue *q = get_queue(L); int h = hash_fd(fd); struct uncomplete * uc = skynet_malloc(sizeof(struct uncomplete)); memset(uc, 0, sizeof(*uc)); uc->next = q->hash[h]; uc->pack.id = fd; q->hash[h] = uc; return uc; }
bool hash_file(struct hash *hash, const char *fname) { int fd = open(fname, O_RDONLY|O_BINARY); if (fd == -1) { cc_log("Failed to open %s: %s", fname, strerror(errno)); return false; } bool ret = hash_fd(hash, fd); close(fd); return ret; }
fde_t * lookup_fd(int fd) { fde_t *F = fd_hash[hash_fd(fd)]; while (F) { if (F->fd == fd) return F; F = F->hnext; } return NULL; }
static uint8_t *hash_file(const char *filename, uint8_t hash_algo) { int src_fd = strcmp(filename, "-") == 0 ? STDIN_FILENO : open(filename, O_RDONLY); if (src_fd == -1) { bb_perror_msg("%s", filename); return NULL; } else { uint8_t *hash_value; RESERVE_CONFIG_UBUFFER(hash_value_bin, 20); hash_value = hash_fd(src_fd, -1, hash_algo, hash_value_bin) != -2 ? hash_bin_to_hex(hash_value_bin, hash_algo == HASH_MD5 ? 16 : 20) : NULL; RELEASE_CONFIG_BUFFER(hash_value_bin); close(src_fd); return hash_value; } }
/* Called to open a given filedescriptor */ void fd_open(fde_t *F, int fd, int is_socket, const char *desc) { unsigned int hashv = hash_fd(fd); assert(fd >= 0); F->fd = fd; F->comm_index = -1; if (desc) strlcpy(F->desc, desc, sizeof(F->desc)); /* Note: normally we'd have to clear the other flags, * but currently F is always cleared before calling us.. */ F->flags.open = 1; F->flags.is_socket = is_socket; F->hnext = fd_hash[hashv]; fd_hash[hashv] = F; number_fd++; }
int fd_hashset(int *fd) { int i, fd_ndx = -1; for(i=0;i<config.max_replica;++i) { if ((!config.replicas[i].disabled) && (fd[i]>=0)) { dbg("trying %d %d\n", i, fd[i]); fd_ndx = fd_hashseekfree(hash_fd(fd[i])); break; } } if (i==config.max_replica) { return(-1); } // If the hash address is not used then use it if (fd_ndx>=0) { config.tab_fd.fd[fd_ndx] = fd; } return(fd_ndx); }
void stat_cache_update_fd(struct hash *hash, int fd, const struct hash *path_hash) { initialize(); // lstat the file struct stat st; if (real_fstat(fd, &st) < 0) die("fstat(%d) failed: %s", fd, strerror(errno)); // For now we go the simple route and hold the stat_cache lock for the // entire duration of the hash computation. In future we may want to drop // the lock while we compute the hash. Alternatively, switching to a finer // grain locking discipline might avoid the problem. shared_map_lock(&stat_cache); // Lookup entry, creating it if necessary, and check if it's up to date struct stat_cache_entry *entry; if (!shared_map_lookup(&stat_cache, path_hash, (void**)&entry, 1) || entry->st_mtimespec.tv_nsec != st.st_mtimespec.tv_nsec || entry->st_mtimespec.tv_sec != st.st_mtimespec.tv_sec || entry->st_size != st.st_size || entry->st_ino != st.st_ino || hash_is_all_one(&entry->contents_hash)) { // Entry is new or out of date. In either case, compute hash and // record new stat details. entry->st_ino = st.st_ino; entry->st_mtimespec = st.st_mtimespec; entry->st_size = st.st_size; // Hash the file if (lseek(fd, 0, SEEK_SET) < 0) die("lseek failed: %s", strerror(errno)); hash_fd(&entry->contents_hash, fd); } shared_map_unlock(&stat_cache); *hash = entry->contents_hash; }
/* Called to close a given filedescriptor */ void fd_close(fde_t *F) { unsigned int hashv = hash_fd(F->fd); if (F == fd_next_in_loop) fd_next_in_loop = F->hnext; if (F->flags.is_socket) comm_setselect(F, COMM_SELECT_WRITE | COMM_SELECT_READ, NULL, NULL, 0); delete_resolver_queries(F); #ifdef HAVE_LIBCRYPTO if (F->ssl) SSL_free(F->ssl); #endif if (fd_hash[hashv] == F) fd_hash[hashv] = F->hnext; else { fde_t *prev; /* let it core if not found */ for (prev = fd_hash[hashv]; prev->hnext != F; prev = prev->hnext) ; prev->hnext = F->hnext; } /* Unlike squid, we're actually closing the FD here! -- adrian */ close(F->fd); number_fd--; memset(F, 0, sizeof(fde_t)); }
/* [lua_api] 将套接字发送过来的数据返回给 Lua 层使用. 函数首先将检查 queue 的不完整数据包哈希表中是否存在套接字 fd 的 * 不完整数据包, 如果有则检查其已经读取的内容长度, 并将剩余部分复制过去, 如果刚好是一个数据包大小就直接返回 "data" 字符 * 串和数据包给 Lua 层, 如果复制后多于一个数据包, 则将这些数据包一起插入到 queue 中, 并返回 "more" 字符串给 Lua 层. * 在不足一个数据包的情况下, 将不完整包重新插入到哈希表中. 对于之前没有不完整包的情况执行相同的流程. 当返回 "more" 时, * 可以通过 lpop 函数取得 queue 中的数据包. buffer 中的内容会复制到新的内存块中, 因而在调用完此函数之后需要释放 buffer 的内存; * * 参数: L 是虚拟机栈, 其位置一就是 queue 数据结构; fd 是套接字 id; buffer 是数据内容; size 是数据大小; * * 返回: userdata[1] 为 queue 数据结构; string/nil[2] 为 "more" 或者 "data" 表示有数据, nil 表示数据不完整; * int[3] 为套接字 id, 仅在 "data" 下返回; lightuserdata[4] 为数据内容, 仅在 "data" 下返回; * int[5] 为数据大小, 仅在 "data" 下返回; */ static int filter_data_(lua_State *L, int fd, uint8_t * buffer, int size) { struct queue *q = lua_touserdata(L,1); struct uncomplete * uc = find_uncomplete(q, fd); if (uc) { // fill uncomplete if (uc->read < 0) { // read size assert(uc->read == -1); int pack_size = *buffer; pack_size |= uc->header << 8 ; ++buffer; --size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); uc->read = 0; } int need = uc->pack.size - uc->read; if (size < need) { memcpy(uc->pack.buffer + uc->read, buffer, size); uc->read += size; int h = hash_fd(fd); uc->next = q->hash[h]; q->hash[h] = uc; return 1; } memcpy(uc->pack.buffer + uc->read, buffer, need); buffer += need; size -= need; if (size == 0) { lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); lua_pushinteger(L, fd); lua_pushlightuserdata(L, uc->pack.buffer); lua_pushinteger(L, uc->pack.size); skynet_free(uc); return 5; } // more data push_data(L, fd, uc->pack.buffer, uc->pack.size, 0); skynet_free(uc); push_more(L, fd, buffer, size); lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); return 2; } else { if (size == 1) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = -1; uc->header = *buffer; return 1; } int pack_size = read_size(buffer); buffer+=2; size-=2; if (size < pack_size) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); memcpy(uc->pack.buffer, buffer, size); return 1; } if (size == pack_size) { // just one package lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); lua_pushinteger(L, fd); void * result = skynet_malloc(pack_size); memcpy(result, buffer, size); lua_pushlightuserdata(L, result); lua_pushinteger(L, size); return 5; } // more data push_data(L, fd, buffer, pack_size, 1); buffer += pack_size; size -= pack_size; push_more(L, fd, buffer, size); lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); return 2; } }
int main(int argc, const char **argv) { int i; const char *prefix = NULL; int prefix_length = -1; const char *errstr = NULL; type = blob_type; git_extract_argv0_path(argv[0]); git_config(git_default_config, NULL); argc = parse_options(argc, argv, hash_object_options, hash_object_usage, 0); if (write_object) { prefix = setup_git_directory(); prefix_length = prefix ? strlen(prefix) : 0; if (vpath && prefix) vpath = prefix_filename(prefix, prefix_length, vpath); } if (stdin_paths) { if (hashstdin) errstr = "Can't use --stdin-paths with --stdin"; else if (argc) errstr = "Can't specify files with --stdin-paths"; else if (vpath) errstr = "Can't use --stdin-paths with --path"; else if (no_filters) errstr = "Can't use --stdin-paths with --no-filters"; } else { if (hashstdin > 1) errstr = "Multiple --stdin arguments are not supported"; if (vpath && no_filters) errstr = "Can't use --path with --no-filters"; } if (errstr) { error("%s", errstr); usage_with_options(hash_object_usage, hash_object_options); } if (hashstdin) hash_fd(0, type, write_object, vpath); for (i = 0 ; i < argc; i++) { const char *arg = argv[i]; if (0 <= prefix_length) arg = prefix_filename(prefix, prefix_length, arg); hash_object(arg, type, write_object, no_filters ? NULL : vpath ? vpath : arg); } if (stdin_paths) hash_stdin_paths(type, write_object); return 0; }
bool hash_command_output(struct mdfour *hash, const char *command, const char *compiler) { #ifdef _WIN32 SECURITY_ATTRIBUTES sa = { sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; HANDLE pipe_out[2]; PROCESS_INFORMATION pi; STARTUPINFO si; DWORD exitcode; bool cmd = false; char *sh = NULL; char *win32args; char *path; BOOL ret; bool ok; int fd; #else pid_t pid; int pipefd[2]; #endif #ifdef _WIN32 /* trim leading space */ while (isspace(*command)) { command++; } /* add "echo" command */ if (str_startswith(command, "echo")) { command = format("cmd.exe /c \"%s\"", command); cmd = true; } else if (str_startswith(command, "%compiler%") && str_eq(compiler, "echo")) { command = format("cmd.exe /c \"%s%s\"", compiler, command + 10); cmd = true; } else { command = x_strdup(command); } #endif struct args *args = args_init_from_string(command); int i; for (i = 0; i < args->argc; i++) { if (str_eq(args->argv[i], "%compiler%")) { args_set(args, i, compiler); } } cc_log_argv("Executing compiler check command ", args->argv); #ifdef _WIN32 memset(&pi, 0x00, sizeof(pi)); memset(&si, 0x00, sizeof(si)); path = find_executable(args->argv[0], NULL); if (!path) { path = args->argv[0]; } sh = win32getshell(path); if (sh) { path = sh; } si.cb = sizeof(STARTUPINFO); CreatePipe(&pipe_out[0], &pipe_out[1], &sa, 0); SetHandleInformation(pipe_out[0], HANDLE_FLAG_INHERIT, 0); si.hStdOutput = pipe_out[1]; si.hStdError = pipe_out[1]; si.hStdInput = GetStdHandle(STD_INPUT_HANDLE); si.dwFlags = STARTF_USESTDHANDLES; if (!cmd) { win32args = win32argvtos(sh, args->argv); } else { win32args = (char *) command; /* quoted */ } ret = CreateProcess(path, win32args, NULL, NULL, 1, 0, NULL, NULL, &si, &pi); CloseHandle(pipe_out[1]); args_free(args); free(win32args); if (cmd) { free((char *) command); /* original argument was replaced above */ } if (ret == 0) { stats_update(STATS_COMPCHECK); return false; } fd = _open_osfhandle((intptr_t) pipe_out[0], O_BINARY); ok = hash_fd(hash, fd); if (!ok) { cc_log("Error hashing compiler check command output: %s", strerror(errno)); stats_update(STATS_COMPCHECK); } WaitForSingleObject(pi.hProcess, INFINITE); GetExitCodeProcess(pi.hProcess, &exitcode); CloseHandle(pipe_out[0]); CloseHandle(pi.hProcess); CloseHandle(pi.hThread); if (exitcode != 0) { cc_log("Compiler check command returned %d", (int) exitcode); stats_update(STATS_COMPCHECK); return false; } return ok; #else if (pipe(pipefd) == -1) { fatal("pipe failed"); } pid = fork(); if (pid == -1) { fatal("fork failed"); } if (pid == 0) { /* Child. */ close(pipefd[0]); close(0); dup2(pipefd[1], 1); dup2(pipefd[1], 2); _exit(execvp(args->argv[0], args->argv)); return false; /* Never reached. */ } else { /* Parent. */ int status; bool ok; args_free(args); close(pipefd[1]); ok = hash_fd(hash, pipefd[0]); if (!ok) { cc_log("Error hashing compiler check command output: %s", strerror(errno)); stats_update(STATS_COMPCHECK); } close(pipefd[0]); if (waitpid(pid, &status, 0) != pid) { cc_log("waitpid failed"); return false; } if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { cc_log("Compiler check command returned %d", WEXITSTATUS(status)); stats_update(STATS_COMPCHECK); return false; } return ok; } #endif }
int cmd_hash_object(int argc, const char **argv, const char *prefix) { static const char * const hash_object_usage[] = { N_("git hash-object [-t <type>] [-w] [--path=<file> | --no-filters] [--stdin] [--] <file>..."), N_("git hash-object --stdin-paths"), NULL }; const char *type = blob_type; int hashstdin = 0; int stdin_paths = 0; int no_filters = 0; int literally = 0; int nongit = 0; unsigned flags = HASH_FORMAT_CHECK; const char *vpath = NULL; const struct option hash_object_options[] = { OPT_STRING('t', NULL, &type, N_("type"), N_("object type")), OPT_BIT('w', NULL, &flags, N_("write the object into the object database"), HASH_WRITE_OBJECT), OPT_COUNTUP( 0 , "stdin", &hashstdin, N_("read the object from stdin")), OPT_BOOL( 0 , "stdin-paths", &stdin_paths, N_("read file names from stdin")), OPT_BOOL( 0 , "no-filters", &no_filters, N_("store file as is without filters")), OPT_BOOL( 0, "literally", &literally, N_("just hash any random garbage to create corrupt objects for debugging Git")), OPT_STRING( 0 , "path", &vpath, N_("file"), N_("process file as it were from this path")), OPT_END() }; int i; const char *errstr = NULL; argc = parse_options(argc, argv, NULL, hash_object_options, hash_object_usage, 0); if (flags & HASH_WRITE_OBJECT) prefix = setup_git_directory(); else prefix = setup_git_directory_gently(&nongit); if (vpath && prefix) vpath = xstrdup(prefix_filename(prefix, vpath)); git_config(git_default_config, NULL); if (stdin_paths) { if (hashstdin) errstr = "Can't use --stdin-paths with --stdin"; else if (argc) errstr = "Can't specify files with --stdin-paths"; else if (vpath) errstr = "Can't use --stdin-paths with --path"; } else { if (hashstdin > 1) errstr = "Multiple --stdin arguments are not supported"; if (vpath && no_filters) errstr = "Can't use --path with --no-filters"; } if (errstr) { error("%s", errstr); usage_with_options(hash_object_usage, hash_object_options); } if (hashstdin) hash_fd(0, type, vpath, flags, literally); for (i = 0 ; i < argc; i++) { const char *arg = argv[i]; char *to_free = NULL; if (prefix) arg = to_free = prefix_filename(prefix, arg); hash_object(arg, type, no_filters ? NULL : vpath ? vpath : arg, flags, literally); free(to_free); } if (stdin_paths) hash_stdin_paths(type, no_filters, flags, literally); return 0; }
/// 从 buffer 读取数据, 返回 lua 函数返回值的个数, 函数的第一个返回值是 queue. static int filter_data_(lua_State *L, int fd, uint8_t * buffer, int size) { struct queue *q = lua_touserdata(L, 1); struct uncomplete * uc = find_uncomplete(q, fd); // 注意, 这里将 uncomplete 从 queue.hash 中移除了 if (uc) { // fill uncomplete // 填充 uncomplete if (uc->read < 0) { // 当包头还未完整读取的情况 // read size assert(uc->read == -1); // 获得数据内容大小 int pack_size = *buffer; pack_size |= uc->header << 8 ; // 偏移到实际数据内容指针开始位置 ++buffer; // 实际的数据内容大小 --size; uc->pack.size = pack_size; // 记录实际需要读取的内容大小 uc->pack.buffer = skynet_malloc(pack_size); // 分配内存空间 uc->read = 0; // 标记还未开始读取数据内容 } // 计算需要读取的数据 int need = uc->pack.size - uc->read; if (size < need) { // 如果 buffer 待读取的数据还不足, 尽可能读取能够读取的数据 // 读取可读的数据 memcpy(uc->pack.buffer + uc->read, buffer, size); uc->read += size; // 再次压入到 queue.hash 中 int h = hash_fd(fd); uc->next = q->hash[h]; q->hash[h] = uc; return 1; } // 读取完整的数据内容 memcpy(uc->pack.buffer + uc->read, buffer, need); // 跳过已经读取的内容 buffer += need; // 计算剩余的可读取数据大小 size -= need; // buffer 中的数据恰好足够读取 if (size == 0) { lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); // macro TYPE_DATA lua_pushinteger(L, fd); // socket id lua_pushlightuserdata(L, uc->pack.buffer); // buffer lua_pushinteger(L, uc->pack.size); // buffer size skynet_free(uc); return 5; } // more data // buffer 有更多的数据可读, 将数据压入 queue.queue 中 push_data(L, fd, uc->pack.buffer, uc->pack.size, 0); skynet_free(uc); push_more(L, fd, buffer, size); // 继续读取剩下的数据 lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); // macro TYPE_MORE return 2; } else { if (size == 1) { // 仅读取包头的 1 个数据 struct uncomplete * uc = save_uncomplete(L, fd); uc->read = -1; uc->header = *buffer; return 1; } // 读取包头的数据 int pack_size = read_size(buffer); buffer+=2; size-=2; // 如果 buffer 的数据不够读, 将 buffer 数据全部读取 if (size < pack_size) { struct uncomplete * uc = save_uncomplete(L, fd); uc->read = size; uc->pack.size = pack_size; uc->pack.buffer = skynet_malloc(pack_size); memcpy(uc->pack.buffer, buffer, size); return 1; } // 如果 buffer 的数据恰好是 1 个包的数据大小, 将 buffer 数据全部读取 if (size == pack_size) { // just one package lua_pushvalue(L, lua_upvalueindex(TYPE_DATA)); // macro TYPE_DATA lua_pushinteger(L, fd); // socket id void * result = skynet_malloc(pack_size); memcpy(result, buffer, size); lua_pushlightuserdata(L, result); // buffer lua_pushinteger(L, size); // buffer size return 5; } // more data // 如果 buffer 的数据大于 1 个包的数据大小, 那么继续读取 buffer 里面的数据 push_data(L, fd, buffer, pack_size, 1); buffer += pack_size; size -= pack_size; push_more(L, fd, buffer, size); lua_pushvalue(L, lua_upvalueindex(TYPE_MORE)); // macro TYPE_MORE return 2; } }