int player_request_pid(void) { int i,j; int pid = -1; static int last = -1; pthread_mutex_lock(&priv_pid_mutex); log_debug1("[player_request_pid:%d]last=%d\n", __LINE__, last); i = last+1; if (i >= (MAX_PLAYER_THREADS)) { i = 0; } for (j=0;j < MAX_PLAYER_THREADS; j++) { if (!(priv_pid_pool & (1 << i))) { priv_pid_pool |= (1 << i); priv_pid_data[i] = NULL; priv_pid_used[i] = 0; pid = i; last=i; log_debug1("[player_request_pid:%d]last=%d pid=%d\n", __LINE__, last, pid); break; } i = i + 1; if (i >= (MAX_PLAYER_THREADS)) { i = 0; } } pthread_mutex_unlock(&priv_pid_mutex); return pid; }
void ClusterImpl::read(std::istream& in) { log_debug1("read"); // read first offset, which specifies, how many offsets we need to read size_type offset; in.read(reinterpret_cast<char*>(&offset), sizeof(offset)); if (in.fail()) return; offset = fromLittleEndian(&offset); size_type n = offset / 4; size_type a = offset; log_debug1("first offset is " << offset << " n=" << n << " a=" << a); // read offsets offsets.clear(); data.clear(); offsets.reserve(n); offsets.push_back(0); while (--n) { in.read(reinterpret_cast<char*>(&offset), sizeof(offset)); if (in.fail()) { log_debug1("fail at " << n); return; } offset = fromLittleEndian(&offset); log_debug1("offset=" << offset << '(' << offset-a << ')'); offsets.push_back(offset - a); } // last offset points past the end of the cluster, so we know now, how may bytes to read if (offsets.size() > 1) { n = offsets.back() - offsets.front(); if (n > 0) { data.resize(n); log_debug1("read " << n << " bytes of data"); in.read(&(data[0]), n); } else log_warn("read empty cluster"); } }
int send_message(play_para_t *para, player_cmd_t *cmd) { int ret = -1; message_pool_t *pool = ¶->message_pool; //log_print("[send_message:%d]num=%d in_idx=%d out_idx=%d\n",__LINE__,pool->message_num,pool->message_in_index,pool->message_out_index); pthread_mutex_lock(&pool->msg_mutex); if (pool->message_num < MESSAGE_MAX) { pool->message_list[pool->message_in_index] = cmd; pool->message_in_index = (pool->message_in_index + 1) % MESSAGE_MAX; pool->message_num++; wakeup_player_thread(para); ret = 0; } else { /*message_num is full*/ player_cmd_t *oldestcmd; oldestcmd = pool->message_list[pool->message_in_index]; FREE(oldestcmd); pool->message_out_index = (pool->message_out_index + 1) % MESSAGE_MAX; /*del the oldest command*/ pool->message_list[pool->message_in_index] = cmd; pool->message_in_index = (pool->message_in_index + 1) % MESSAGE_MAX; wakeup_player_thread(para); ret = 0; } log_debug1("[send_message:%d]num=%d in_idx=%d out_idx=%d cmd=%x mode=%d\n", __LINE__, pool->message_num, pool->message_in_index, pool->message_out_index, cmd->ctrl_cmd, cmd->set_mode); pthread_mutex_unlock(&pool->msg_mutex); return ret; }
/* --------------------------------------------------------------------------*/ int player_start(play_control_t *ctrl_p, unsigned long priv) { int ret; int pid = -1; play_para_t *p_para; //char stb_source[32]; update_loglevel_setting(); print_version_info(); log_print("[player_start:enter]p=%p black=%d\n", ctrl_p, get_black_policy()); if (ctrl_p == NULL) { return PLAYER_EMPTY_P; } /*keep last frame displaying --default*/ set_black_policy(0); /* if not set keep last frame, or change file playback, clear display last frame */ if (!ctrl_p->displast_frame) { set_black_policy(1); } else if (!check_file_same(ctrl_p->file_name)) { set_black_policy(1); } pid = player_request_pid(); if (pid < 0) { return PLAYER_NOT_VALID_PID; } p_para = MALLOC(sizeof(play_para_t)); if (p_para == NULL) { return PLAYER_NOMEM; } MEMSET(p_para, 0, sizeof(play_para_t)); /* init time_point to a invalid value */ p_para->playctrl_info.time_point = -1; player_init_pid_data(pid, p_para); message_pool_init(p_para); p_para->start_param = ctrl_p; p_para->player_id = pid; p_para->extern_priv = priv; log_debug1("[player_start]player_para=%p,start_param=%p pid=%d\n", p_para, p_para->start_param, pid); ret = player_thread_create(p_para) ; if (ret != PLAYER_SUCCESS) { FREE(p_para); player_release_pid(pid); return PLAYER_CAN_NOT_CREAT_THREADS; } log_print("[player_start:exit]pid = %d \n", pid); return pid; }
int ngx_http_clojure_eval(int cid, void *r) { JNIEnv *env = jvm_env; int rc; log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure eval request: %ul", (uintptr_t)r); log_debug2(ngx_http_clojure_global_cycle->log, "ngx clojure eval request to jlong: %" PRIu64 ", size: %d", (jlong)r, 8); rc = (*env)->CallStaticIntMethod(env, nc_rt_class, nc_rt_eval_mid, (jint)cid, (uintptr_t)r); exception_handle(1, env, return 500); return rc; }
void process_ip(void) { cur_pkt->ip_hdr = (ip_hdr_t *) (cur_pkt->buf + sizeof(eth_hdr_t)); cur_sock->src_ip = cur_pkt->ip_hdr->src_addr; if (unlikely(cur_pkt->ip_hdr->version != 4)) { log_debug1("this is not the packet you are looking for"); return; } if (unlikely(! is_this_card_ip((struct in_addr *) &cur_pkt->ip_hdr->dst_addr))) { log_debug1("this is not the packet you are looking for"); return; } switch(cur_pkt->ip_hdr->proto) { case IP_PROTO_TCP: process_tcp(); break; case IP_PROTO_ICMP: process_icmp(); break; } }
/* This return the number of char read */ offset_type ClusterImpl::read_header(std::istream& in) { log_debug1("read_header"); // read first offset, which specifies, how many offsets we need to read size_type offset; in.read(reinterpret_cast<char*>(&offset), sizeof(offset)); if (in.fail()) { std::cerr << "fail at read offset" << std::endl; throw ZimFileFormatError("fail at read first offset"); } offset = fromLittleEndian(&offset); size_type n = offset / 4; size_type a = offset; log_debug1("first offset is " << offset << " n=" << n << " a=" << a); // read offsets offsets.clear(); offsets.reserve(n); offsets.push_back(0); while (--n) { in.read(reinterpret_cast<char*>(&offset), sizeof(offset)); if (in.fail()) { log_debug("fail at " << n); throw ZimFileFormatError("fail at read offset"); } offset = fromLittleEndian(&offset); log_debug1("offset=" << offset << '(' << offset-a << ')'); offsets.push_back(offset - a); } return a; }
void ClusterImpl::read_content(std::istream& in) { log_debug1("read_content"); _data.clear(); // last offset points past the end of the cluster, so we know now, how may bytes to read if (offsets.size() > 1) { size_type n = offsets.back() - offsets.front(); if (n > 0) { _data.resize(n); log_debug("read " << n << " bytes of data"); in.read(&(_data[0]), n); } else log_warn("read empty cluster"); } }
void ClusterImpl::addBlob(const Blob& blob) { log_debug1("addBlob(ptr, " << blob.size() << ')'); data.insert(data.end(), blob.data(), blob.end()); offsets.push_back(data.size()); }
void database_load_zone_desc(zone_desc_s *zone_desc) { yassert(zone_desc != NULL); log_debug1("database_load_zone_desc(%{dnsname}@%p=%i)", zone_desc->origin, zone_desc, zone_desc->rc); s32 err = zone_register(&database_zone_desc, zone_desc); if(ISOK(err)) { log_info("zone: %{dnsname}: %p: config: registered", zone_desc->origin, zone_desc); zone_lock(zone_desc, ZONE_LOCK_LOAD_DESC); zone_set_status(zone_desc, ZONE_STATUS_REGISTERED); zone_clear_status(zone_desc, ZONE_STATUS_DROP_AFTER_RELOAD); zone_unlock(zone_desc, ZONE_LOCK_LOAD_DESC); // newly registered zone // used to be message->origin if(database_service_started()) { database_zone_load(zone_desc->origin); // before this I should set the file name #if HAS_MASTER_SUPPORT if(zone_desc->type == ZT_MASTER) { if(!host_address_empty(zone_desc->slaves)) { log_info("zone: %{dnsname}: %p: config: notifying slaves", zone_desc->origin, zone_desc); host_address *slaves = host_address_copy_list(zone_desc->slaves); notify_host_list(zone_desc, slaves, CLASS_CTRL); } } else #endif { } } } else { switch(err) { case DATABASE_ZONE_MISSING_DOMAIN: { log_err("zone: ?: %p: config: no domain set (not loaded)", zone_desc); if(zone_get_status(zone_desc) & ZONE_STATUS_PROCESSING) { log_err("zone: ?: %p: is processed by %s (releasing)", zone_desc, database_service_operation_get_name(zone_desc->last_processor)); } zone_release(zone_desc); break; } case DATABASE_ZONE_MISSING_MASTER: { log_err("zone: %{dnsname}: %p: config: slave but no master setting (not loaded)", zone_desc->origin, zone_desc); if(zone_get_status(zone_desc) & ZONE_STATUS_PROCESSING) { log_err("zone: ?: %p: is processed by %s (releasing)", zone_desc, database_service_operation_get_name(zone_desc->last_processor)); } zone_release(zone_desc); break; } case DATABASE_ZONE_CONFIG_CLONE: // Exact copy { log_debug("zone: %{dnsname}: %p: config: has already been set (same settings)", zone_desc->origin, zone_desc); zone_desc_s* current = zone_acquirebydnsname(zone_desc->origin); zone_lock(current, ZONE_LOCK_REPLACE_DESC); zone_clear_status(current, ZONE_STATUS_DROP_AFTER_RELOAD); zone_unlock(current, ZONE_LOCK_REPLACE_DESC); zone_release(current); // whatever has been decided above, loading the zone file (if it changed) should be queued database_zone_load(zone_desc->origin); zone_release(zone_desc); break; } case DATABASE_ZONE_CONFIG_DUP: // Not an exact copy { log_debug("zone: %{dnsname}: %p: config: has already been set (different settings)", zone_desc->origin, zone_desc); // basically, most of the changes require a stop, restart of // any task linked to the zone // so let's make this a rule, whatever changed notify_clear(zone_desc->origin); zone_desc_s *current = zone_acquirebydnsname(zone_desc->origin); #if HAS_DYNAMIC_PROVISIONING host_address *notify_slaves_then_delete = NULL; host_address *notify_slaves = NULL; #endif if(current != zone_desc) { zone_lock(current, ZONE_LOCK_REPLACE_DESC); if(zone_get_status(current) & ZONE_STATUS_PROCESSING) { log_err("zone: ?: %p: is processed by %s (overwriting)", zone_desc, database_service_operation_get_name(zone_desc->last_processor)); } // what happens if the change is on : // domain: impossible /// @todo 20131203 edf -- compare before replace // file_name : try to load the new file (will happen anyway) if((current->file_name != NULL) && (zone_desc->file_name != NULL)) { if(strcmp(current->file_name, zone_desc->file_name) != 0) { zone_set_status(current, ZONE_STATUS_MODIFIED); } } else if(current->file_name != zone_desc->file_name) // at least one of them is NULL { zone_set_status(current, ZONE_STATUS_MODIFIED); } free(current->file_name); current->file_name = zone_desc->file_name; zone_desc->file_name = NULL; // masters : log_debug7("updating %p (%u) with %p (%u): masters", current, current->lock_owner, zone_desc, zone_desc->lock_owner); if(host_address_list_equals(current->masters, zone_desc->masters)) { host_address_delete_list(zone_desc->masters); } else { host_address_delete_list(current->masters); current->masters = zone_desc->masters; } zone_desc->masters = NULL; // notifies : log_debug7("updating %p (%u) with %p (%u): notifies", current, current->lock_owner, zone_desc, zone_desc->lock_owner); if(host_address_list_equals(current->notifies, zone_desc->notifies)) { host_address_delete_list(zone_desc->notifies); } else { host_address_delete_list(current->notifies); current->notifies = zone_desc->notifies; } zone_desc->notifies = NULL; #if HAS_DYNAMIC_PROVISIONING log_debug7("updating %p (%u) with %p (%u): slaves", current, current->lock_owner, zone_desc, zone_desc->lock_owner); if(host_address_list_equals(current->slaves, zone_desc->slaves)) { #if HAS_MASTER_SUPPORT if((current->type == ZT_MASTER) || (zone_desc->type == ZT_MASTER)) { notify_slaves_then_delete = zone_desc->slaves; } else #endif { host_address_delete_list(zone_desc->slaves); } } else { #if HAS_MASTER_SUPPORT if(current->type == ZT_MASTER) { notify_slaves_then_delete = current->slaves; } else { host_address_delete_list(current->slaves); } if(zone_desc->type == ZT_MASTER) { notify_slaves = zone_desc->slaves; } #else host_address_delete_list(current->slaves); #endif current->slaves = zone_desc->slaves; } zone_desc->slaves = NULL; #endif // type : ? log_debug7("updating %p (%u) with %p (%u): type", current, current->lock_owner, zone_desc, zone_desc->lock_owner); current->type = zone_desc->type; #if HAS_ACL_SUPPORT // ac : apply the new one, update the zone access log_debug7("updating %p (%u) with %p (%u): ac@%p with ac@%p", current, current->lock_owner, zone_desc, zone_desc->lock_owner, ¤t->ac, &zone_desc->ac); #ifdef DEBUG log_debug7("old@%p:", current); log_debug7(" notify@%p",current->ac.allow_notify.ipv4.items); log_debug7(" query@%p",current->ac.allow_query.ipv4.items); log_debug7(" transfer@%p",current->ac.allow_transfer.ipv4.items); log_debug7(" update@%p",current->ac.allow_update.ipv4.items); log_debug7("forwarding@%p",current->ac.allow_update_forwarding.ipv4.items); log_debug7(" control@%p",current->ac.allow_control.ipv4.items); log_debug7("new@%p:", zone_desc); log_debug7(" notify@%p",zone_desc->ac.allow_notify.ipv4.items); log_debug7(" query@%p",zone_desc->ac.allow_query.ipv4.items); log_debug7(" transfer@%p",zone_desc->ac.allow_transfer.ipv4.items); log_debug7(" update@%p",zone_desc->ac.allow_update.ipv4.items); log_debug7("forwarding@%p",zone_desc->ac.allow_update_forwarding.ipv4.items); log_debug7(" control@%p",zone_desc->ac.allow_control.ipv4.items); #endif acl_unmerge_access_control(¤t->ac, &g_config->ac); acl_empties_access_control(¤t->ac); memcpy(¤t->ac, &zone_desc->ac, sizeof(access_control)); ZEROMEMORY(&zone_desc->ac, sizeof(access_control)); #endif // notify : reset, restart log_debug7("updating %p (%u) with %p (%u): notify", current, current->lock_owner, zone_desc, zone_desc->lock_owner); memcpy(¤t->notify, &zone_desc->notify, sizeof(zone_notify_s)); #if HAS_DNSSEC_SUPPORT #if HAS_RRSIG_MANAGEMENT_SUPPORT // signature : reset, restart log_debug7("updating %p (%u) with %p (%u): signature", current, current->lock_owner, zone_desc, zone_desc->lock_owner); memcpy(¤t->signature, &zone_desc->signature, sizeof(zone_signature_s)); #endif // dnssec_mode : drop everything related to the zone, load the new config log_debug7("updating %p (%u) with %p (%u): dnssec_mode", current, current->lock_owner, zone_desc, zone_desc->lock_owner); current->dnssec_mode = zone_desc->dnssec_mode; #endif // refresh : update the "alarms" log_debug7("updating %p (%u) with %p (%u): refresh", current, current->lock_owner, zone_desc, zone_desc->lock_owner); memcpy(¤t->refresh, &zone_desc->refresh, sizeof(zone_refresh_s)); // dynamic_provisioning : ? log_debug7("updating %p (%u) with %p (%u): dynamic_provisioning", current, current->lock_owner, zone_desc, zone_desc->lock_owner); memcpy(¤t->dynamic_provisioning, &zone_desc->dynamic_provisioning, sizeof(dynamic_provisioning_s)); // slaves : update the list zone_unlock(current, ZONE_LOCK_REPLACE_DESC); } // whatever has been decided above, loading the zone file should be queued database_zone_load(zone_desc->origin); #if HAS_DYNAMIC_PROVISIONING // if asking for a load of the zone_data on a master should trigger a notify of its slaves log_debug7("handling dynamic provisioning"); if(!host_address_empty(notify_slaves_then_delete)) { log_info("zone load desc: %{dnsname}: notifying slaves: %{hostaddrlist}", zone_desc->origin, notify_slaves_then_delete); notify_host_list(current, notify_slaves_then_delete, CLASS_CTRL); notify_slaves_then_delete = NULL; } if(!host_address_empty(notify_slaves)) { log_info("zone load desc: %{dnsname}: notifying slaves: %{hostaddrlist}", zone_desc->origin, notify_slaves); host_address *notify_slaves_copy = host_address_copy_list(notify_slaves); notify_host_list(current, notify_slaves_copy, CLASS_CTRL); notify_slaves = NULL; } #endif #if HAS_MASTER_SUPPORT && HAS_DNSSEC_SUPPORT && HAS_RRSIG_MANAGEMENT_SUPPORT if(current->dnssec_policy != zone_desc->dnssec_policy) { log_info("zone: %{dnsname}: %p: config: dnssec-policy modified", zone_desc->origin, zone_desc); if(zone_desc->dnssec_policy != NULL) { if(current->dnssec_policy != NULL) { log_warn("zone: %{dnsname}: %p: config: changing dnssec-policy at runtime (%s to %s)", zone_desc->origin, zone_desc, current->dnssec_policy->name, zone_desc->dnssec_policy->name); if(current->dnssec_policy->denial != zone_desc->dnssec_policy->denial) { log_warn("zone: %{dnsname}: %p: config: modifications of the dnssec-policy denial setting may be ignored", zone_desc->origin, zone_desc); } dnssec_policy_release(current->dnssec_policy); current->dnssec_policy = dnssec_policy_acquire_from_name(zone_desc->dnssec_policy->name); } else { log_info("zone: %{dnsname}: %p: config: dnssec-policy %s enabled", zone_desc->origin, zone_desc, zone_desc->dnssec_policy->name); current->dnssec_policy = dnssec_policy_acquire_from_name(zone_desc->dnssec_policy->name); } } else { log_warn("zone: %{dnsname}: %p: config: removing policy at runtime", zone_desc->origin, zone_desc); dnssec_policy_release(current->dnssec_policy); current->dnssec_policy = NULL; } } #endif if(current != zone_desc) { log_debug7("destroying temporary zone descriptor @%p", zone_desc); zone_release(zone_desc); } zone_clear_status(zone_desc, ZONE_STATUS_DROP_AFTER_RELOAD); zone_release(current); break; } // DUP default: { log_err("zone: %{dnsname}: %p: failed to register", zone_desc->origin, zone_desc); break; } } // switch } log_debug1("database_load_zone_desc(%p) done", zone_desc); }
static int ngx_http_clojure_pipe(ngx_socket_t fds[2]) { #if defined(_WIN32) || defined(WIN32) SOCKET s; struct sockaddr_in serv_addr; int len = sizeof( serv_addr ); fds[0] = fds[1] = INVALID_SOCKET; if ( ( s = socket( AF_INET, SOCK_STREAM, 0 ) ) == INVALID_SOCKET ) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to create socket: %ui", WSAGetLastError()); return -1; } memset( &serv_addr, 0, sizeof( serv_addr ) ); serv_addr.sin_family = AF_INET; serv_addr.sin_port = htons(0); serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to bind: %ui", WSAGetLastError()); closesocket(s); return -1; } if (listen(s, 1) == SOCKET_ERROR) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to listen: %ui", WSAGetLastError()); closesocket(s); return -1; } if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to getsockname: %ui", WSAGetLastError()); closesocket(s); return -1; } if ((fds[1] = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to create socket 2: %ui", WSAGetLastError()); closesocket(s); return -1; } if (connect(fds[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to connect socket: %ui", WSAGetLastError()); closesocket(s); return -1; } if ((fds[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET) { log_debug1(ngx_http_clojure_global_cycle->log, "ngx clojure:ngx_http_clojure_pipe failed to accept socket 2: %ui", WSAGetLastError()); closesocket(fds[1]); fds[1] = INVALID_SOCKET; closesocket(s); return -1; } closesocket(s); return 0; #else return pipe(fds); #endif }