pevents_t * pevents_create(struct pevent_cb_set profiles, struct pevent_cb_set channels) { pevents_t *events = calloc(1, sizeof(*events)); if (!events) { MSG_ERROR(msg_module, "Unable to allocate memory (%s:%d)", __FILE__, __LINE__); return NULL; } /* Initialize an internal structures. Because we don't have information * about profiles/channels we should prepare empty structures. * However, pre-allocation for "0" items is not suitable, so we use "1". */ if (group_init(&events->profiles, 1)) { free(events); return NULL; } if (group_init(&events->channels, 1)) { group_deinit(&events->profiles); free(events); return NULL; } events->channels.cbs = channels; events->profiles.cbs = profiles; return events; }
/** * \brief Create a new update structure and parse a new profile tree * * Returned structure is prepared for mapping old channels/profiles to new * channels/profiles (those are store in this update structure). * \param[in] mgr Event manager * \param[in] tree_root Root profile of the new profile tree * \return On success returns a pointer to the structure. Otherwise (memory * allocation error) returns NULL. */ static struct pevents_update * pevents_update_create(pevents_t *mgr, void *tree_root) { struct pevents_update *update; update = (struct pevents_update *) calloc(1, sizeof(*update)); if (!update) { MSG_ERROR(msg_module, "Unable to allocate memory (%s:%d)", __FILE__, __LINE__); return NULL; } struct pevents_group *chnl_grp = &update->channels; struct pevents_group *prfl_grp = &update->profiles; /* Prepare hints (expected number of channels/profiles) * Helps to prevent expensive reallocations. */ size_t profile_hint = mgr->profiles.all_size + PEVENTS_HINT_OVERLAP; size_t channel_hint = mgr->channels.all_size + PEVENTS_HINT_OVERLAP; if (profile_hint < PEVENTS_HINT_PROFILE) { profile_hint = PEVENTS_HINT_PROFILE; } if (channel_hint < PEVENTS_HINT_CHANNELS) { channel_hint = PEVENTS_HINT_CHANNELS; } // Initialize groups for channels and profiles if (group_init(prfl_grp, profile_hint)) { free(update); return NULL; } if (group_init(chnl_grp, channel_hint)) { group_deinit(prfl_grp); free(update); return NULL; } // Parse the profile tree if (pevents_update_parse_tree(update, tree_root)) { pevents_update_delete(update); return NULL; } // Set user global variables for (size_t i = 0; i < chnl_grp->all_size; ++i) { struct pevents_item *item = group_item_at(chnl_grp, i); item->ctx.user.global = mgr->user_global; } for (size_t i = 0; i < prfl_grp->all_size; ++i) { struct pevents_item *item = group_item_at(prfl_grp, i); item->ctx.user.global = mgr->user_global; } // Success return update; }
enum bsmp_err bsmp_server_init (struct bsmp_server *server) { if(!server) return BSMP_ERR_PARAM_INVALID; memset(server, 0, sizeof(*server)); group_init(&server->groups.list[GROUP_ALL_ID], GROUP_ALL_ID); group_init(&server->groups.list[GROUP_READ_ID], GROUP_READ_ID); group_init(&server->groups.list[GROUP_WRITE_ID], GROUP_WRITE_ID); server->groups.count = GROUP_STANDARD_COUNT; return BSMP_SUCCESS; }
void init(void) { app_init(); doi_init(); exchange_init(); group_init(); ipsec_init(); isakmp_doi_init(); libcrypto_init(); timer_init(); /* The following group are depending on timer_init having run. */ conf_init(); connection_init(); /* This depends on conf_init, thus check as soon as possible. */ log_reinit(); /* policy_init depends on conf_init having run. */ policy_init(); /* Depends on conf_init and policy_init having run */ cert_init(); crl_init(); sa_init(); transport_init(); virtual_init(); udp_init(); nat_t_init(); udp_encap_init(); vendor_init(); }
static void init (void) { static struct { int (*f) (); char buf[40]; } con[] = { { getpeername, "TCPREMOTEIP" }, { getsockname, "TCPLOCALIP" } }; struct sigaction sa; struct sockaddr_in sin; int i, len; if (-1 == chdir(snroot)) FAIL(2, "chdir(%s):%m", snroot); (void) dh_open(NULL, FALSE); if (-1 == group_init()) { dh_close(); _exit(2); } for (i = 0; len = sizeof (sin), i < 2; i++) if (!getenv(con[i].buf)) if (0 == (*(con[i].f)) (0, (struct sockaddr *) &sin, &len)) putenv(strcat(strcat(con[i].buf, "="), inet_ntoa(sin.sin_addr))); client_ip = con->buf + sizeof ("TCPREMOTEIP") - 1; if ('=' == *client_ip) client_ip++; me = myname(); sa.sa_handler = handler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; sigaction(SIGHUP, &sa, NULL); sa.sa_handler = SIG_IGN; sigaction(SIGPIPE, &sa, NULL); docheckservice(); }
void screen_init(int which) { struct screen_ctx *sc; Window *wins, w0, w1; XWindowAttributes winattr; XSetWindowAttributes rootattr; unsigned int nwins, i; sc = xcalloc(1, sizeof(*sc)); sc->which = which; sc->visual = DefaultVisual(X_Dpy, sc->which); sc->colormap = DefaultColormap(X_Dpy, sc->which); sc->rootwin = RootWindow(X_Dpy, sc->which); xu_ewmh_net_supported(sc); xu_ewmh_net_supported_wm_check(sc); conf_screen(sc); screen_update_geometry(sc); TAILQ_INIT(&sc->mruq); group_init(sc); rootattr.cursor = Conf.cursor[CF_NORMAL]; rootattr.event_mask = SubstructureRedirectMask|SubstructureNotifyMask| PropertyChangeMask|EnterWindowMask|LeaveWindowMask| ColormapChangeMask|BUTTONMASK; XChangeWindowAttributes(X_Dpy, sc->rootwin, CWEventMask|CWCursor, &rootattr); /* Deal with existing clients. */ XQueryTree(X_Dpy, sc->rootwin, &w0, &w1, &wins, &nwins); for (i = 0; i < nwins; i++) { XGetWindowAttributes(X_Dpy, wins[i], &winattr); if (winattr.override_redirect || winattr.map_state != IsViewable) continue; (void)client_init(wins[i], sc, winattr.map_state != IsUnmapped); } XFree(wins); screen_updatestackingorder(sc); if (HasRandr) XRRSelectInput(X_Dpy, sc->rootwin, RRScreenChangeNotifyMask); TAILQ_INSERT_TAIL(&Screenq, sc, entry); XSync(X_Dpy, False); }
int main(int argc, char *argv[]) { GROUP grp; group_init(&grp); group_start(&grp, argv[1], argv[2]); int index; for(index = 0; index < CNT; index ++) pthread_join(grp.arr[index].id,NULL); return 0; }
void __init psi_init(void) { if (!psi_enable) { static_branch_enable(&psi_disabled); return; } psi_period = jiffies_to_nsecs(PSI_FREQ); group_init(&psi_system); }
int psi_cgroup_alloc(struct cgroup *cgroup) { if (static_branch_likely(&psi_disabled)) return 0; cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); if (!cgroup->psi.pcpu) return -ENOMEM; group_init(&cgroup->psi); return 0; }
bool groupws_init(WGroupWS *ws, WWindow *parent, const WFitParams *fp) { if(!group_init(&(ws->grp), parent, fp, "Notion GroupWS")) return FALSE; ws->initial_outputs=extl_create_table(); ((WRegion*)ws)->flags|=REGION_GRAB_ON_PARENT; region_add_bindmap((WRegion*)ws, ioncore_groupws_bindmap); return TRUE; }
int main(void) { int len, id; char buf[DH_MAXSZ], buf2[DH_MAXSZ]; char sec[DH_MAXSZ], sec2[DH_MAXSZ]; struct group *group, *group2; const char *name[] = { "MODP", "EC2N", "ECP" }; group_init(); for (id = 0; id < 0xff; id++) { if ((group = group_get(id)) == NULL || (group2 = group_get(id)) == NULL) continue; printf ("Testing group %d (%s%d): ", id, name[group->spec->type], group->spec->bits); len = dh_getlen(group); dh_create_exchange(group, buf); dh_create_exchange(group2, buf2); dh_create_shared(group, sec, buf2); dh_create_shared(group2, sec2, buf); if (memcmp (sec, sec2, len)) { printf("FAILED\n"); return (1); } else printf("OKAY\n"); group_free(group); group_free(group2); } return (0); }
static enum sllp_err message_process(sllp_instance_t *sllp, struct message *recv_msg, struct message *send_msg) { if(!recv_msg || !send_msg) return SLLP_ERR_PARAM_INVALID; sllp->modified_list[0] = NULL; switch(recv_msg->command_code) { /*case CMD_QUERY_STATUS: //Answer with CMD_STATUS // Set answer code message_set_answer(send_msg, CMD_ERR_OP_NOT_SUPPORTED); break;*/ case CMD_QUERY_VARS_LIST: // Answer with CMD_VARS_LIST { // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, 0, false)) break; // Set answer's command_code and payload_size message_set_answer(send_msg, CMD_VARS_LIST); // Variables are in order of their ID's struct sllp_var *var = NULL; int i; for(i = 0; i < sllp->vars_list.count; ++i) { sllp_list_value_at(&sllp->vars_list, i, (void**) &var); send_msg->payload[i] = (*var).writable ? WRITABLE : READ_ONLY; send_msg->payload[i] += var->size; } send_msg->payload_size = sllp->vars_list.count; break; } case CMD_QUERY_GROUPS_LIST: // Answer with CMD_GROUPS_LIST { // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, 0, false)) break; // Set answer's command_code and payload_size message_set_answer(send_msg, CMD_GROUPS_LIST); // Iterate struct sllp_group *grp; int i; for(i = 0; i < sllp->groups_list.count; ++i) { sllp_list_value_at(&sllp->groups_list, i, (void**) &grp); send_msg->payload[i] = grp->writable ? WRITABLE : READ_ONLY; send_msg->payload[i] += grp->vars_list.count; } send_msg->payload_size = sllp->groups_list.count; break; } case CMD_QUERY_GROUP: // Answer with CMD_GROUP { // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, 1, false)) break; // Set answer code message_set_answer(send_msg, CMD_GROUP); // Get desired group struct sllp_group *grp; if(sllp_list_value_at(&sllp->groups_list, recv_msg->payload[0], (void**) &grp)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } // Iterate over group's variables struct sllp_var *var; int i; for(i = 0; i < grp->vars_list.count; ++i) { sllp_list_value_at(&grp->vars_list, i, (void**) &var); send_msg->payload[i] = var->id; } send_msg->payload_size = grp->vars_list.count; break; } case CMD_QUERY_CURVES_LIST: { if(!is_payload_size_equal_to(recv_msg, send_msg, 0, false)) break; message_set_answer(send_msg, CMD_CURVES_LIST); struct sllp_curve *curve; uint8_t *payloadp = send_msg->payload; int i; for(i = 0; i < sllp->curves_list.count; ++i) { sllp_list_value_at(&sllp->curves_list, i, (void** )&curve); (*payloadp++) = curve->writable; (*payloadp++) = curve->nblocks; memcpy(payloadp, curve->checksum, sizeof(curve->checksum)); payloadp += sizeof(curve->checksum); } send_msg->payload_size = sllp->curves_list.count*CURVE_INFO_SIZE; break; } case CMD_READ_VAR: // Answer with CMD_VAR_READING { // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, 1, false)) break; // Set answer code message_set_answer(send_msg, CMD_VAR_READING); // Get desired variable struct sllp_var *var; if(sllp_list_value_at(&sllp->vars_list, recv_msg->payload[0], (void**) &var)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } if(sllp->hook) { sllp->modified_list[0] = var; sllp->modified_list[1] = NULL; sllp->hook(SLLP_OP_READ, sllp->modified_list); } send_msg->payload_size = var->size; memcpy(send_msg->payload, var->data, var->size); break; } case CMD_READ_GROUP: { // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, 1, false)) break; // Set answer code message_set_answer(send_msg, CMD_GROUP_READING); // Get desired group struct sllp_group *grp; if(sllp_list_value_at(&sllp->groups_list, recv_msg->payload[0], (void**) &grp)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } // Call hook if(sllp->hook) { sllp_list_copy_to_vector(&grp->vars_list, (void**) sllp->modified_list); sllp->hook(SLLP_OP_READ, sllp->modified_list); } // Iterate over group's variables struct sllp_var *var; uint8_t *payloadp = send_msg->payload; int i = 0; while(!sllp_list_value_at(&grp->vars_list, i, (void**) &var)) { memcpy(payloadp, var->data, var->size); payloadp += var->size; ++i; } send_msg->payload_size = grp->data_size; break; } case CMD_WRITE_VAR: { // Check to see if body has at least two bytes (one for id, at least one // for the value) if(!is_payload_size_equal_to(recv_msg, send_msg, 2, true)) break; // Set answer code message_set_answer(send_msg, CMD_OK); // Check ID struct sllp_var *var; if(sllp_list_value_at(&sllp->vars_list, recv_msg->payload[0], (void**) &var)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, var->size + 1, false)) break; // Check write permission if(!var->writable) { message_set_answer(send_msg, CMD_ERR_READ_ONLY); break; } // Everything is OK, perform the write operation memcpy(var->data, recv_msg->payload + 1, var->size); // Call hook if(sllp->hook) { sllp->modified_list[0] = var; sllp->modified_list[1] = NULL; sllp->hook(SLLP_OP_WRITE, sllp->modified_list); } break; } case CMD_WRITE_GROUP: { // Check to see if body has at least two bytes (one for id, at least one // for the values) if(!is_payload_size_equal_to(recv_msg, send_msg, 2, true)) break; // Check ID struct sllp_group *grp; if(sllp_list_value_at(&sllp->groups_list, (unsigned int) recv_msg->payload, (void**) &grp)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } // Check payload size if(!is_payload_size_equal_to(recv_msg, send_msg, grp->data_size + 1, false)) break; // Check write permission if(!grp->writable) { message_set_answer(send_msg, CMD_ERR_READ_ONLY); break; } // Everything is OK, iterate int i = 0; struct sllp_var *var; uint8_t *payloadp = recv_msg->payload + 1; while(!sllp_list_value_at(&grp->vars_list, i, (void**) &var)) { memcpy(var->data, payloadp, var->size); payloadp += var->size; ++i; } // Call hook if(sllp->hook) { sllp_list_copy_to_vector(&grp->vars_list, (void**) sllp->modified_list); sllp->hook(SLLP_OP_WRITE, sllp->modified_list); } break; } case CMD_CREATE_GROUP: { // Check if there's at least one variable to put on the group if(!is_payload_size_equal_to(recv_msg, send_msg, 1, true)) break; if(is_payload_size_equal_to(recv_msg, send_msg, sllp->vars_list.count + 1, true)) break; if(sllp->groups_list.count == MAX_GROUPS) { message_set_answer(send_msg, CMD_ERR_INSUFFICIENT_MEMORY); break; } message_set_answer(send_msg, CMD_GROUP_CREATED); // Allocate group structure struct sllp_group *grp; grp = malloc(sizeof(*grp)); if(!grp) { message_set_answer(send_msg, CMD_ERR_INSUFFICIENT_MEMORY); break; } // Initialize group group_init(grp, sllp->groups_list.count, true); // Populate group int i; for(i = 0; i < recv_msg->payload_size; ++i) { struct sllp_var *var; if(!sllp_list_value_at(&sllp->vars_list, recv_msg->payload[i], (void**) &var )) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); goto cmd_group_create_err; } if(!sllp_list_add(&grp->vars_list, (void*) var)) { message_set_answer(send_msg, CMD_ERR_INSUFFICIENT_MEMORY); goto cmd_group_create_err; } grp->writable = grp->writable && var->writable; grp->data_size += var->size; } message_set_answer(send_msg, CMD_GROUP_CREATED); send_msg->payload_size = 1; send_msg->payload[0] = grp->writable ? WRITABLE : READ_ONLY; send_msg->payload[0] += grp->id; break; cmd_group_create_err: sllp_list_clear(&grp->vars_list); free(grp); break; } case CMD_REMOVE_ALL_GROUPS: { if(!is_payload_size_equal_to(recv_msg, send_msg, 0, false)) break; message_set_answer(send_msg, CMD_OK); sllp_list_trim(&sllp->groups_list, GROUP_STANDARD_COUNT); break; } case CMD_CURVE_TRANSMIT: { if(!is_payload_size_equal_to(recv_msg, send_msg, 2, false)) break; struct sllp_curve *curve; if(sllp_list_value_at(&sllp->curves_list, recv_msg->payload[0], (void**) &curve)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } uint8_t block_offset = recv_msg->payload[1]; if(block_offset > curve->nblocks) { message_set_answer(send_msg, CMD_ERR_INVALID_VALUE); break; } message_set_answer(send_msg, CMD_CURVE_BLOCK); send_msg->payload[0] = curve->id; send_msg->payload[1] = block_offset; curve->read_block(curve, block_offset, send_msg->payload + 2); send_msg->payload_size = 2 + CURVE_BLOCK_DATA_SIZE; break; } case CMD_CURVE_BLOCK: { if(!is_payload_size_equal_to(recv_msg, send_msg, 2 + CURVE_BLOCK_DATA_SIZE, false)) break; uint8_t id = recv_msg->payload[0]; struct sllp_curve *curve; if(sllp_list_value_at(&sllp->curves_list, id, (void**) &curve)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } uint8_t block_offset = recv_msg->payload[1]; if(block_offset > curve->nblocks) { message_set_answer(send_msg, CMD_ERR_INVALID_VALUE); break; } curve->write_block(curve, block_offset, recv_msg->payload + 2); message_set_answer(send_msg, CMD_OK); break; } case CMD_CURVE_RECALC_CSUM: { if(!is_payload_size_equal_to(recv_msg, send_msg, 1, false)) break; uint8_t id = recv_msg->payload[0]; struct sllp_curve *curve; if(!sllp_list_value_at(&sllp->curves_list, id, (void**) &curve)) { message_set_answer(send_msg, CMD_ERR_INVALID_ID); break; } unsigned int nblocks = curve->nblocks + 1; uint8_t block[CURVE_BLOCK_DATA_SIZE]; MD5_CTX md5ctx; MD5Init(&md5ctx); unsigned int i; for(i = 0; i < nblocks; ++i) { curve->read_block(curve, (uint8_t)i, block); MD5Update(&md5ctx, block, CURVE_BLOCK_DATA_SIZE); } MD5Final(curve->checksum, &md5ctx); message_set_answer(send_msg, CMD_OK); break; } default: message_set_answer(send_msg, CMD_ERR_OP_NOT_SUPPORTED); break; } return SLLP_SUCCESS; }
/* * Move back and forward change groups for a consistent and pretty diff output. * This also helps in finding joinable change groups and reducing the diff * size. */ int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) { struct xdlgroup g, go; long earliest_end, end_matching_other; long groupsize; group_init(xdf, &g); group_init(xdfo, &go); while (1) { /* If the group is empty in the to-be-compacted file, skip it: */ if (g.end == g.start) goto next; /* * Now shift the change up and then down as far as possible in * each direction. If it bumps into any other changes, merge them. */ do { groupsize = g.end - g.start; /* * Keep track of the last "end" index that causes this * group to align with a group of changed lines in the * other file. -1 indicates that we haven't found such * a match yet: */ end_matching_other = -1; /* Shift the group backward as much as possible: */ while (!group_slide_up(xdf, &g, flags)) if (group_previous(xdfo, &go)) xdl_bug("group sync broken sliding up"); /* * This is this highest that this group can be shifted. * Record its end index: */ earliest_end = g.end; if (go.end > go.start) end_matching_other = g.end; /* Now shift the group forward as far as possible: */ while (1) { if (group_slide_down(xdf, &g, flags)) break; if (group_next(xdfo, &go)) xdl_bug("group sync broken sliding down"); if (go.end > go.start) end_matching_other = g.end; } } while (groupsize != g.end - g.start); /* * If the group can be shifted, then we can possibly use this * freedom to produce a more intuitive diff. * * The group is currently shifted as far down as possible, so the * heuristics below only have to handle upwards shifts. */ if (g.end == earliest_end) { /* no shifting was possible */ } else if (end_matching_other != -1) { /* * Move the possibly merged group of changes back to line * up with the last group of changes from the other file * that it can align with. */ while (go.end == go.start) { if (group_slide_up(xdf, &g, flags)) xdl_bug("match disappeared"); if (group_previous(xdfo, &go)) xdl_bug("group sync broken sliding to match"); } } else if (flags & XDF_INDENT_HEURISTIC) { /* * Indent heuristic: a group of pure add/delete lines * implies two splits, one between the end of the "before" * context and the start of the group, and another between * the end of the group and the beginning of the "after" * context. Some splits are aesthetically better and some * are worse. We compute a badness "score" for each split, * and add the scores for the two splits to define a * "score" for each position that the group can be shifted * to. Then we pick the shift with the lowest score. */ long shift, best_shift = -1; struct split_score best_score; shift = earliest_end; if (g.end - groupsize - 1 > shift) shift = g.end - groupsize - 1; if (g.end - INDENT_HEURISTIC_MAX_SLIDING > shift) shift = g.end - INDENT_HEURISTIC_MAX_SLIDING; for (; shift <= g.end; shift++) { struct split_measurement m; struct split_score score = {0, 0}; measure_split(xdf, shift, &m); score_add_split(&m, &score); measure_split(xdf, shift - groupsize, &m); score_add_split(&m, &score); if (best_shift == -1 || score_cmp(&score, &best_score) <= 0) { best_score.effective_indent = score.effective_indent; best_score.penalty = score.penalty; best_shift = shift; } } while (g.end > best_shift) { if (group_slide_up(xdf, &g, flags)) xdl_bug("best shift unreached"); if (group_previous(xdfo, &go)) xdl_bug("group sync broken sliding to blank line"); } } next: /* Move past the just-processed group: */ if (group_next(xdf, &g)) break; if (group_next(xdfo, &go)) xdl_bug("group sync broken moving to next group"); } if (!group_next(xdfo, &go)) xdl_bug("group sync broken at end of file"); return 0; }