int dns_server_new( Manager *m, DnsServer **ret, DnsServerType type, Link *l, int family, const union in_addr_union *in_addr) { DnsServer *s, *tail; assert(m); assert((type == DNS_SERVER_LINK) == !!l); assert(in_addr); s = new0(DnsServer, 1); if (!s) return -ENOMEM; s->n_ref = 1; s->type = type; s->family = family; s->address = *in_addr; s->resend_timeout = DNS_TIMEOUT_MIN_USEC; if (type == DNS_SERVER_LINK) { LIST_FIND_TAIL(servers, l->dns_servers, tail); LIST_INSERT_AFTER(servers, l->dns_servers, tail, s); s->link = l; } else if (type == DNS_SERVER_SYSTEM) { LIST_FIND_TAIL(servers, m->dns_servers, tail); LIST_INSERT_AFTER(servers, m->dns_servers, tail, s); } else if (type == DNS_SERVER_FALLBACK) { LIST_FIND_TAIL(servers, m->fallback_dns_servers, tail); LIST_INSERT_AFTER(servers, m->fallback_dns_servers, tail, s); } else assert_not_reached("Unknown server type"); s->manager = m; /* A new DNS server that isn't fallback is added and the one * we used so far was a fallback one? Then let's try to pick * the new one */ if (type != DNS_SERVER_FALLBACK && m->current_dns_server && m->current_dns_server->type == DNS_SERVER_FALLBACK) manager_set_dns_server(m, NULL); if (ret) *ret = s; return 0; }
int server_name_new( Manager *m, ServerName **ret, ServerType type, const char *string) { ServerName *n, *tail; assert(m); assert(string); n = new0(ServerName, 1); if (!n) return -ENOMEM; n->type = type; n->string = strdup(string); if (!n->string) { free(n); return -ENOMEM; } if (type == SERVER_SYSTEM) { LIST_FIND_TAIL(names, m->system_servers, tail); LIST_INSERT_AFTER(names, m->system_servers, tail, n); } else if (type == SERVER_LINK) { LIST_FIND_TAIL(names, m->link_servers, tail); LIST_INSERT_AFTER(names, m->link_servers, tail, n); } else if (type == SERVER_FALLBACK) { LIST_FIND_TAIL(names, m->fallback_servers, tail); LIST_INSERT_AFTER(names, m->fallback_servers, tail, n); } else assert_not_reached("Unknown server type"); n->manager = m; if (type != SERVER_FALLBACK && m->current_server_name && m->current_server_name->type == SERVER_FALLBACK) manager_set_server_name(m, NULL); log_debug("Added new server %s.", string); if (ret) *ret = n; return 0; }
/* * vs_msgsave -- * Save a message for later display. */ static void vs_msgsave(SCR *sp, mtype_t mt, char *p, size_t len) { GS *gp; MSGS *mp_c, *mp_n; /* * We have to handle messages before we have any place to put them. * If there's no screen support yet, allocate a msg structure, copy * in the message, and queue it on the global structure. If we can't * allocate memory here, we're genuinely screwed, dump the message * to stderr in the (probably) vain hope that someone will see it. */ CALLOC_GOTO(sp, mp_n, MSGS *, 1, sizeof(MSGS)); MALLOC_GOTO(sp, mp_n->buf, char *, len); memmove(mp_n->buf, p, len); mp_n->len = len; mp_n->mtype = mt; gp = sp->gp; if ((mp_c = gp->msgq.lh_first) == NULL) { LIST_INSERT_HEAD(&gp->msgq, mp_n, q); } else { for (; mp_c->q.le_next != NULL; mp_c = mp_c->q.le_next); LIST_INSERT_AFTER(mp_c, mp_n, q); } return; alloc_err: if (mp_n != NULL) free(mp_n); (void)fprintf(stderr, "%.*s\n", (int)len, p); }
int server_address_new( ServerName *n, ServerAddress **ret, const union sockaddr_union *sockaddr, socklen_t socklen) { ServerAddress *a, *tail; assert(n); assert(sockaddr); assert(socklen >= offsetof(struct sockaddr, sa_data)); assert(socklen <= sizeof(union sockaddr_union)); a = new0(ServerAddress, 1); if (!a) return -ENOMEM; memcpy(&a->sockaddr, sockaddr, socklen); a->socklen = socklen; LIST_FIND_TAIL(addresses, n->addresses, tail); LIST_INSERT_AFTER(addresses, n->addresses, tail, a); a->name = n; if (ret) *ret = a; return 0; }
void drm_gem_names_foreach(struct drm_gem_names *names, int (*f)(uint32_t, void *, void *), void *arg) { struct drm_gem_name *np; struct drm_gem_name marker; int i, fres; bzero(&marker, sizeof(marker)); marker.name = -1; mtx_lock(&names->lock); for (i = 0; i <= names->hash_mask; i++) { for (np = LIST_FIRST(&names->names_hash[i]); np != NULL; ) { if (np->name == -1) { np = LIST_NEXT(np, link); continue; } LIST_INSERT_AFTER(np, &marker, link); mtx_unlock(&names->lock); fres = f(np->name, np->ptr, arg); mtx_lock(&names->lock); np = LIST_NEXT(&marker, link); LIST_REMOVE(&marker, link); if (fres) break; } } mtx_unlock(&names->lock); }
/* * Find the correct place to insert the prefix in the prefix list. * If the active prefix has changed we need to send an update. * The to evaluate prefix must not be in the prefix list. */ void prefix_evaluate(struct prefix *p, struct rib_entry *re) { struct prefix *xp; if (re->flags & F_RIB_NOEVALUATE || rde_noevaluate()) { /* decision process is turned off */ if (p != NULL) LIST_INSERT_HEAD(&re->prefix_h, p, rib_l); if (re->active != NULL) { re->active->aspath->active_cnt--; re->active = NULL; } return; } if (p != NULL) { if (LIST_EMPTY(&re->prefix_h)) LIST_INSERT_HEAD(&re->prefix_h, p, rib_l); else { LIST_FOREACH(xp, &re->prefix_h, rib_l) if (prefix_cmp(p, xp) > 0) { LIST_INSERT_BEFORE(xp, p, rib_l); break; } else if (LIST_NEXT(xp, rib_l) == NULL) { /* if xp last element ... */ LIST_INSERT_AFTER(xp, p, rib_l); break; } } } xp = LIST_FIRST(&re->prefix_h); if (xp == NULL || xp->aspath->flags & F_ATTR_LOOP || (xp->aspath->nexthop != NULL && xp->aspath->nexthop->state != NEXTHOP_REACH)) /* xp is ineligible */ xp = NULL; if (re->active != xp) { /* need to generate an update */ if (re->active != NULL) re->active->aspath->active_cnt--; /* * Send update with remove for re->active and add for xp * but remember that xp may be NULL aka ineligible. * Additional decision may be made by the called functions. */ rde_generate_updates(re->ribid, xp, re->active); if ((re->flags & F_RIB_NOFIB) == 0) rde_send_kroute(xp, re->active); re->active = xp; if (xp != NULL) xp->aspath->active_cnt++; } }
/* * Set up the given timer. The value in pt->pt_time.it_value is taken * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. */ void timer_settime(struct ptimer *pt) { struct ptimer *ptn, *pptn; struct ptlist *ptl; KASSERT(mutex_owned(&timer_lock)); if (!CLOCK_VIRTUAL_P(pt->pt_type)) { callout_halt(&pt->pt_ch, &timer_lock); if (timespecisset(&pt->pt_time.it_value)) { /* * Don't need to check tshzto() return value, here. * callout_reset() does it for us. */ callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ? tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value), realtimerexpire, pt); } } else { if (pt->pt_active) { ptn = LIST_NEXT(pt, pt_list); LIST_REMOVE(pt, pt_list); for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) timespecadd(&pt->pt_time.it_value, &ptn->pt_time.it_value, &ptn->pt_time.it_value); } if (timespecisset(&pt->pt_time.it_value)) { if (pt->pt_type == CLOCK_VIRTUAL) ptl = &pt->pt_proc->p_timers->pts_virtual; else ptl = &pt->pt_proc->p_timers->pts_prof; for (ptn = LIST_FIRST(ptl), pptn = NULL; ptn && timespeccmp(&pt->pt_time.it_value, &ptn->pt_time.it_value, >); pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) timespecsub(&pt->pt_time.it_value, &ptn->pt_time.it_value, &pt->pt_time.it_value); if (pptn) LIST_INSERT_AFTER(pptn, pt, pt_list); else LIST_INSERT_HEAD(ptl, pt, pt_list); for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) timespecsub(&ptn->pt_time.it_value, &pt->pt_time.it_value, &ptn->pt_time.it_value); pt->pt_active = 1; } else pt->pt_active = 0; }
void dns_server_move_back_and_unmark(DnsServer *s) { DnsServer *tail; assert(s); if (!s->marked) return; s->marked = false; if (!s->linked || !s->servers_next) return; /* Move us to the end of the list, so that the order is * strictly kept, if we are not at the end anyway. */ switch (s->type) { case DNS_SERVER_LINK: assert(s->link); LIST_FIND_TAIL(servers, s, tail); LIST_REMOVE(servers, s->link->dns_servers, s); LIST_INSERT_AFTER(servers, s->link->dns_servers, tail, s); break; case DNS_SERVER_SYSTEM: LIST_FIND_TAIL(servers, s, tail); LIST_REMOVE(servers, s->manager->dns_servers, s); LIST_INSERT_AFTER(servers, s->manager->dns_servers, tail, s); break; case DNS_SERVER_FALLBACK: LIST_FIND_TAIL(servers, s, tail); LIST_REMOVE(servers, s->manager->fallback_dns_servers, s); LIST_INSERT_AFTER(servers, s->manager->fallback_dns_servers, tail, s); break; default: assert_not_reached("Unknown server type"); } }
/** * void gen_page_refs() * * Generate all page refs to use in tests * * @return 0 */ void gen_page_refs() { num_refs = 0; LIST_INIT(&page_refs); Page_Ref *page = gen_ref(); LIST_INSERT_HEAD(&page_refs, page, pages); while(num_refs < max_page_calls) { // generate a page ref up too max_page_calls and add to list LIST_INSERT_AFTER(page, gen_ref(), pages); page = page->pages.le_next; num_refs++; } // we need look-ahead for Optimal algorithm int all_found = 0; optimum_find_test = (int*)malloc(page_ref_upper_bound*sizeof(int)); size_t i; for(i = 0; i < page_ref_upper_bound; ++i) { // generate new refs until one of each have been added to list optimum_find_test[i] = -1; } while(all_found == 0) { // generate new refs until one of each have been added to list LIST_INSERT_AFTER(page, gen_ref(), pages); page = page->pages.le_next; optimum_find_test[page->page_num] = 1; all_found = 1; for(i = 0; i < page_ref_upper_bound; ++i) { // see if we've got them all yet if(optimum_find_test[i] == -1) { all_found = 0; break; } } num_refs++; } return; }
static int eisa_add_resvaddr(struct eisa_device *e_dev, struct resvlist *head, u_long base, u_long size, int flags) { resvaddr_t *reservation; reservation = (resvaddr_t *)malloc(sizeof(resvaddr_t), M_DEVBUF, M_NOWAIT); if(!reservation) return (ENOMEM); reservation->addr = base; reservation->size = size; reservation->flags = flags; if (!LIST_FIRST(head)) { LIST_INSERT_HEAD(head, reservation, links); } else { resvaddr_t *node; LIST_FOREACH(node, head, links) { if (node->addr > reservation->addr) { /* * List is sorted in increasing * address order. */ LIST_INSERT_BEFORE(node, reservation, links); break; } if (node->addr == reservation->addr) { /* * If the entry we want to add * matches any already in here, * fail. */ free(reservation, M_DEVBUF); return (EEXIST); } if (!LIST_NEXT(node, links)) { LIST_INSERT_AFTER(node, reservation, links); break; } } } return (0); }
/* * To prevent an additional free/malloc we recycle the allocated memory. * * First remove it from its current state, causing the next block to 'fall' * into place. Then we randomize it(WHICH WIPES ALL DATA, INCLUDING PREVIOUS * POINTERS) and reinstall it at the end of the list. */ static void update_cur_block() { struct blocks *last, *np = CURRENT_BLOCK(); LIST_REMOVE(np, entries); randomize_block(np); /* Find last block in list */ for (last = FIRST_NEXT_BLOCK(); last && last->entries.le_next; last = last->entries.le_next) ; LIST_INSERT_AFTER(last, np, entries); }
/* * insert a new element in an existing list that the ID's (size in struct * pci_memreg) are sorted. */ static void insert_into_list(PCI_MEMREG *head, struct pci_memreg *elem) { struct pci_memreg *p, *q; p = LIST_FIRST(head); q = NULL; for (; p != NULL && p->size < elem->size; q = p, p = LIST_NEXT(p, link)); if (q == NULL) { LIST_INSERT_HEAD(head, elem, link); } else { LIST_INSERT_AFTER(q, elem, link); } }
// // Mark all environments in 'envs' as free, set their env_ids to 0, // and insert them into the env_free_list. // Insert in reverse order, so that the first call to env_alloc() // returns envs[0]. // void env_init(void) { // LAB 3: Your code here. // sunus,DEC 6,2010 int i; struct Env *pcheck; envs[0].env_status = ENV_FREE; envs[0].env_id = 0; LIST_INIT(&env_free_list); /* LEAVE IT THERE , I DON'T KNOW IF IT WAS RIGHT , sunus for(i = NENV - 1 ; i >= 0 ; i--) { envs[i].env_status = ENV_FREE; envs[i].env_id = 0; LIST_INSERT_HEAD(&env_free_list, &envs[i], env_link); } */ LIST_INSERT_HEAD(&env_free_list, &envs[0], env_link); for(i = 1 ; i < NENV ; i++) { envs[i].env_status = ENV_FREE; envs[i].env_id = 0; LIST_INSERT_AFTER(&envs[i - 1], &envs[i], env_link); } #if S_DEBUG_ENV_INIT cprintf("env test begins\n"); /*check to see if the link-list is all set*/ for( i = 0 ; i < NENV ; i++) envs[i].env_id = i; pcheck = LIST_FIRST(&env_free_list); for( i = 0 ; i < NENV ; i++) { cprintf("evn[%d]'s id is %d\n",i,pcheck->env_id); pcheck = (pcheck->env_link).le_next; } for(i = 0 ; i < 1024 ; i+=4) { cprintf("pgdir[%04d] : %08x %08x %08x %08x\n",i,boot_pgdir[i],boot_pgdir[i+1],boot_pgdir[i+2],boot_pgdir[i+3]); } #endif // sunus ,DEC 6,2010 }
/** * Algorithm_Data* create_algo_data_store(int num_frames) * * Creates an empty Algorithm_Data to init an Algorithm * * @return {Algorithm_Data*} empty Algorithm_Data struct for an Algorithm */ Algorithm_Data *create_algo_data_store() { Algorithm_Data *data = malloc(sizeof(Algorithm_Data)); data->hits = 0; data->misses = 0; data->last_victim = NULL; /* Initialize Lists */ LIST_INIT(&(data->page_table)); LIST_INIT(&(data->victim_list)); /* Insert at the page_table. */ Frame *framep = create_empty_frame(0); LIST_INSERT_HEAD(&(data->page_table), framep, frames); /* Build the rest of the list. */ size_t i = 0; for (i = 1; i < num_frames; ++i) { LIST_INSERT_AFTER(framep, create_empty_frame(i), frames); framep = framep->frames.le_next; } return data; }
void backend_event_loop_insert_timeout(struct backend_event_loop *del, struct backend_timeout_handle *handle) { struct backend_timeout_handle *itr = del->timeout_list.lh_first, *prev_itr; if (itr == NULL || handle->timeout_clock < itr->timeout_clock) { LIST_INSERT_HEAD(&(del->timeout_list), handle, timeout_next); return; } //Move to a separate insert function for (; itr != NULL; itr = itr->timeout_next.le_next) { if (handle->timeout_clock < itr->timeout_clock) break; prev_itr = itr; } LIST_INSERT_AFTER(prev_itr, handle, timeout_next); }
/* * Insert a dev_data into the provided list, sorted by select code. */ static void dev_data_insert(struct dev_data *dd, ddlist_t *ddlist) { struct dev_data *de; #ifdef DIAGNOSTIC if (dd->dd_scode < 0 || dd->dd_scode > 255) { printf("bogus select code for %s\n", dd->dd_dev->dv_xname); panic("dev_data_insert"); } #endif de = LIST_FIRST(ddlist); /* * Just insert at head if list is empty. */ if (de == NULL) { LIST_INSERT_HEAD(ddlist, dd, dd_clist); return; } /* * Traverse the list looking for a device who's select code * is greater than ours. When we find it, insert ourselves * into the list before it. */ for (; LIST_NEXT(de, dd_clist) != NULL; de = LIST_NEXT(de, dd_clist)) { if (de->dd_scode > dd->dd_scode) { LIST_INSERT_BEFORE(de, dd, dd_clist); return; } } /* * Our select code is greater than everyone else's. We go * onto the end. */ LIST_INSERT_AFTER(de, dd, dd_clist); }
static int read_processes_settings(void *settings, void *data) { struct swupdate_cfg *sw = (struct swupdate_cfg *)data; void *elem; int count, i; struct extproc *proc, *last = NULL; count = get_array_length(LIBCFG_PARSER, settings); for(i = 0; i < count; ++i) { elem = get_elem_from_idx(LIBCFG_PARSER, settings, i); if (!elem) continue; if(!(exist_field_string(LIBCFG_PARSER, elem, "name"))) continue; if(!(exist_field_string(LIBCFG_PARSER, elem, "exec"))) continue; proc = (struct extproc *)calloc(1, sizeof(struct extproc)); GET_FIELD_STRING(LIBCFG_PARSER, elem, "name", proc->name); GET_FIELD_STRING(LIBCFG_PARSER, elem, "exec", proc->exec); if (!last) LIST_INSERT_HEAD(&sw->extprocs, proc, next); else LIST_INSERT_AFTER(last, proc, next); last = proc; TRACE("External process \"%s\": \"%s %s\" will be started", proc->name, proc->exec, proc->options); } return 0; }
int main(int argc, char *argv[]) { LIST_INIT(&head); int i; for (i = 0; i < 10; i++) { all_data[i].data = i; LIST_INSERT_HEAD(&head, &all_data[i], list); } for (; i < 20; i++) { all_data[i].data = i; LIST_INSERT_AFTER(&all_data[2], &all_data[i], list); } mydata *p; for (p = head.lh_first; p != NULL; p = p->list.le_next) { printf("data = %d\n", p->data); } return 0; }
/* * mark_set -- * Set the location referenced by a mark. * * PUBLIC: int mark_set __P((SCR *, ARG_CHAR_T, MARK *, int)); */ int mark_set(SCR *sp, ARG_CHAR_T key, MARK *value, int userset) { LMARK *lmp, *lmt; if (key == ABSMARK2) key = ABSMARK1; /* * The rules are simple. If the user is setting a mark (if it's a * new mark this is always true), it always happens. If not, it's * an undo, and we set it if it's not already set or if it was set * by a previous undo. */ lmp = mark_find(sp, key); if (lmp == NULL || (ARG_CHAR_T)lmp->name != key) { MALLOC_RET(sp, lmt, LMARK *, sizeof(LMARK)); if (lmp == NULL) { LIST_INSERT_HEAD(&sp->ep->marks, lmt, q); } else LIST_INSERT_AFTER(lmp, lmt, q); lmp = lmt; } else if (!userset &&
void MCAddMsg(int msgId, const char *str) { struct _msgT *p, *q; if (!curSet) error("can't specify a message when no set exists"); if (msgId <= 0) { error("msgId's must be greater than zero"); /* NOTREACHED */ } if (msgId > NL_MSGMAX) { error("msgID exceeds limit"); /* NOTREACHED */ } p = curSet->msghead.lh_first; q = NULL; for (; p != NULL && p->msgId < msgId; q = p, p = p->entries.le_next); if (p && p->msgId == msgId) { free(p->str); } else { p = xmalloc(sizeof(struct _msgT)); memset(p, '\0', sizeof(struct _msgT)); if (q == NULL) { LIST_INSERT_HEAD(&curSet->msghead, p, entries); } else { LIST_INSERT_AFTER(q, p, entries); } } p->msgId = msgId; p->str = xstrdup(str); }
void MCAddSet(int setId) { struct _setT *p, *q; if (setId <= 0) { error("setId's must be greater than zero"); /* NOTREACHED */ } if (setId > NL_SETMAX) { error("setId exceeds limit"); /* NOTREACHED */ } p = sethead.lh_first; q = NULL; for (; p != NULL && p->setId < setId; q = p, p = p->entries.le_next); if (p && p->setId == setId) { ; } else { p = xmalloc(sizeof(struct _setT)); memset(p, '\0', sizeof(struct _setT)); LIST_INIT(&p->msghead); p->setId = setId; if (q == NULL) { LIST_INSERT_HEAD(&sethead, p, entries); } else { LIST_INSERT_AFTER(q, p, entries); } } curSet = p; }
int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) { struct drm_hash_item *entry, *parent; struct drm_hash_item_list *h_list; unsigned int hashed_key; unsigned long key = item->key; hashed_key = hash32_buf(&key, sizeof(key), ht->order); h_list = &ht->table[hashed_key & ht->mask]; parent = NULL; LIST_FOREACH(entry, h_list, head) { if (entry->key == key) return -EINVAL; if (entry->key > key) break; parent = entry; } if (parent) { LIST_INSERT_AFTER(parent, item, head); } else { LIST_INSERT_HEAD(h_list, item, head); } return 0; }
static int add_entry(struct ip_fw_head *chainptr, struct ip_fw *frwl) { struct ip_fw *ftmp = 0; struct ip_fw_chain *fwc = 0, *fcp, *fcpl = 0; u_short nbr = 0; int s; fwc = malloc(sizeof *fwc, M_IPFW, M_DONTWAIT); ftmp = malloc(sizeof *ftmp, M_IPFW, M_DONTWAIT); if (!fwc || !ftmp) { dprintf(("%s malloc said no\n", err_prefix)); if (fwc) free(fwc, M_IPFW); if (ftmp) free(ftmp, M_IPFW); return (ENOSPC); } bcopy(frwl, ftmp, sizeof(struct ip_fw)); ftmp->fw_in_if.fu_via_if.name[FW_IFNLEN - 1] = '\0'; ftmp->fw_pcnt = 0L; ftmp->fw_bcnt = 0L; fwc->rule = ftmp; s = splnet(); if (!chainptr->lh_first) { LIST_INSERT_HEAD(chainptr, fwc, chain); splx(s); return(0); } else if (ftmp->fw_number == (u_short)-1) { if (fwc) free(fwc, M_IPFW); if (ftmp) free(ftmp, M_IPFW); splx(s); dprintf(("%s bad rule number\n", err_prefix)); return (EINVAL); } /* If entry number is 0, find highest numbered rule and add 100 */ if (ftmp->fw_number == 0) { for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number != (u_short)-1) nbr = fcp->rule->fw_number; else break; } if (nbr < (u_short)-1 - 100) nbr += 100; ftmp->fw_number = nbr; } /* Got a valid number; now insert it, keeping the list ordered */ for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number > ftmp->fw_number) { if (fcpl) { LIST_INSERT_AFTER(fcpl, fwc, chain); } else { LIST_INSERT_HEAD(chainptr, fwc, chain); } break; } else { fcpl = fcp; } } splx(s); return (0); }
int udp_input(struct mbuf **mp, int *offp, int proto) { struct sockaddr_in udp_in = { sizeof udp_in, AF_INET }; int iphlen; struct ip *ip; struct udphdr *uh; struct inpcb *inp; struct mbuf *m; struct mbuf *opts = NULL; int len, off; struct ip save_ip; struct inpcbinfo *pcbinfo = &udbinfo[mycpuid]; off = *offp; m = *mp; *mp = NULL; iphlen = off; udp_stat.udps_ipackets++; /* * Strip IP options, if any; should skip this, * make available to user, and use on returned packets, * but we don't yet have a way to check the checksum * with options still present. */ if (iphlen > sizeof(struct ip)) { ip_stripoptions(m); iphlen = sizeof(struct ip); } /* * IP and UDP headers are together in first mbuf. * Already checked and pulled up in ip_demux(). */ KASSERT(m->m_len >= iphlen + sizeof(struct udphdr), ("UDP header not in one mbuf")); ip = mtod(m, struct ip *); uh = (struct udphdr *)((caddr_t)ip + iphlen); /* destination port of 0 is illegal, based on RFC768. */ if (uh->uh_dport == 0) goto bad; /* * Make mbuf data length reflect UDP length. * If not enough data to reflect UDP length, drop. */ len = ntohs((u_short)uh->uh_ulen); if (ip->ip_len != len) { if (len > ip->ip_len || len < sizeof(struct udphdr)) { udp_stat.udps_badlen++; goto bad; } m_adj(m, len - ip->ip_len); /* ip->ip_len = len; */ } /* * Save a copy of the IP header in case we want restore it * for sending an ICMP error message in response. */ save_ip = *ip; /* * Checksum extended UDP header and data. */ if (uh->uh_sum) { if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) uh->uh_sum = m->m_pkthdr.csum_data; else uh->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl((u_short)len + m->m_pkthdr.csum_data + IPPROTO_UDP)); uh->uh_sum ^= 0xffff; } else { char b[9]; bcopy(((struct ipovly *)ip)->ih_x1, b, 9); bzero(((struct ipovly *)ip)->ih_x1, 9); ((struct ipovly *)ip)->ih_len = uh->uh_ulen; uh->uh_sum = in_cksum(m, len + sizeof(struct ip)); bcopy(b, ((struct ipovly *)ip)->ih_x1, 9); } if (uh->uh_sum) { udp_stat.udps_badsum++; m_freem(m); return(IPPROTO_DONE); } } else udp_stat.udps_nosum++; if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { struct inpcbhead *connhead; struct inpcontainer *ic, *ic_marker; struct inpcontainerhead *ichead; struct udp_mcast_arg arg; struct inpcb *last; int error; /* * Deliver a multicast or broadcast datagram to *all* sockets * for which the local and remote addresses and ports match * those of the incoming datagram. This allows more than * one process to receive multi/broadcasts on the same port. * (This really ought to be done for unicast datagrams as * well, but that would cause problems with existing * applications that open both address-specific sockets and * a wildcard socket listening to the same port -- they would * end up receiving duplicates of every unicast datagram. * Those applications open the multiple sockets to overcome an * inadequacy of the UDP socket interface, but for backwards * compatibility we avoid the problem here rather than * fixing the interface. Maybe 4.5BSD will remedy this?) */ /* * Construct sockaddr format source address. */ udp_in.sin_port = uh->uh_sport; udp_in.sin_addr = ip->ip_src; arg.udp_in = &udp_in; /* * Locate pcb(s) for datagram. * (Algorithm copied from raw_intr().) */ last = NULL; arg.iphlen = iphlen; connhead = &pcbinfo->hashbase[ INP_PCBCONNHASH(ip->ip_src.s_addr, uh->uh_sport, ip->ip_dst.s_addr, uh->uh_dport, pcbinfo->hashmask)]; LIST_FOREACH(inp, connhead, inp_hash) { #ifdef INET6 if (!INP_ISIPV4(inp)) continue; #endif if (!in_hosteq(inp->inp_faddr, ip->ip_src) || !in_hosteq(inp->inp_laddr, ip->ip_dst) || inp->inp_fport != uh->uh_sport || inp->inp_lport != uh->uh_dport) continue; arg.inp = inp; arg.last = last; arg.ip = ip; arg.m = m; error = udp_mcast_input(&arg); if (error == ERESTART) continue; last = arg.last; if (error == EJUSTRETURN) goto done; } ichead = &pcbinfo->wildcardhashbase[ INP_PCBWILDCARDHASH(uh->uh_dport, pcbinfo->wildcardhashmask)]; ic_marker = in_pcbcontainer_marker(mycpuid); GET_PCBINFO_TOKEN(pcbinfo); LIST_INSERT_HEAD(ichead, ic_marker, ic_list); while ((ic = LIST_NEXT(ic_marker, ic_list)) != NULL) { LIST_REMOVE(ic_marker, ic_list); LIST_INSERT_AFTER(ic, ic_marker, ic_list); inp = ic->ic_inp; if (inp->inp_flags & INP_PLACEMARKER) continue; #ifdef INET6 if (!INP_ISIPV4(inp)) continue; #endif if (inp->inp_lport != uh->uh_dport) continue; if (inp->inp_laddr.s_addr != INADDR_ANY && inp->inp_laddr.s_addr != ip->ip_dst.s_addr) continue; arg.inp = inp; arg.last = last; arg.ip = ip; arg.m = m; error = udp_mcast_input(&arg); if (error == ERESTART) continue; last = arg.last; if (error == EJUSTRETURN) break; } LIST_REMOVE(ic_marker, ic_list); REL_PCBINFO_TOKEN(pcbinfo); done: if (last == NULL) { /* * No matching pcb found; discard datagram. * (No need to send an ICMP Port Unreachable * for a broadcast or multicast datgram.) */ udp_stat.udps_noportbcast++; goto bad; } #ifdef IPSEC /* check AH/ESP integrity. */ if (ipsec4_in_reject_so(m, last->inp_socket)) { ipsecstat.in_polvio++; goto bad; } #endif /*IPSEC*/ #ifdef FAST_IPSEC /* check AH/ESP integrity. */ if (ipsec4_in_reject(m, last)) goto bad; #endif /*FAST_IPSEC*/ udp_append(last, ip, m, iphlen + sizeof(struct udphdr), &udp_in); return(IPPROTO_DONE); } /* * Locate pcb for datagram. */ inp = in_pcblookup_pkthash(pcbinfo, ip->ip_src, uh->uh_sport, ip->ip_dst, uh->uh_dport, TRUE, m->m_pkthdr.rcvif, udp_reuseport_ext ? m : NULL); if (inp == NULL) { if (log_in_vain) { char buf[sizeof "aaa.bbb.ccc.ddd"]; strcpy(buf, inet_ntoa(ip->ip_dst)); log(LOG_INFO, "Connection attempt to UDP %s:%d from %s:%d\n", buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src), ntohs(uh->uh_sport)); } udp_stat.udps_noport++; if (m->m_flags & (M_BCAST | M_MCAST)) { udp_stat.udps_noportbcast++; goto bad; } if (blackhole) goto bad; #ifdef ICMP_BANDLIM if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) goto bad; #endif *ip = save_ip; ip->ip_len += iphlen; icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); return(IPPROTO_DONE); } KASSERT(INP_ISIPV4(inp), ("not inet inpcb")); #ifdef IPSEC if (ipsec4_in_reject_so(m, inp->inp_socket)) { ipsecstat.in_polvio++; goto bad; } #endif /*IPSEC*/ #ifdef FAST_IPSEC if (ipsec4_in_reject(m, inp)) goto bad; #endif /*FAST_IPSEC*/ /* * Check the minimum TTL for socket. */ if (ip->ip_ttl < inp->inp_ip_minttl) goto bad; /* * Construct sockaddr format source address. * Stuff source address and datagram in user buffer. */ udp_in.sin_port = uh->uh_sport; udp_in.sin_addr = ip->ip_src; if ((inp->inp_flags & INP_CONTROLOPTS) || (inp->inp_socket->so_options & SO_TIMESTAMP)) ip_savecontrol(inp, &opts, ip, m); m_adj(m, iphlen + sizeof(struct udphdr)); lwkt_gettoken(&inp->inp_socket->so_rcv.ssb_token); if (ssb_appendaddr(&inp->inp_socket->so_rcv, (struct sockaddr *)&udp_in, m, opts) == 0) { lwkt_reltoken(&inp->inp_socket->so_rcv.ssb_token); udp_stat.udps_fullsock++; goto bad; } lwkt_reltoken(&inp->inp_socket->so_rcv.ssb_token); sorwakeup(inp->inp_socket); return(IPPROTO_DONE); bad: m_freem(m); if (opts) m_freem(opts); return(IPPROTO_DONE); }
struct intrhand * intr_establish(int vector, int type, int pri, hw_ifun_t ih_fun, void *ih_arg) { struct intrhand *ih, *cur_vec; ih_list_t *vec_list; u_long *hard_vec; int s; /* no point in sleeping unless someone can free memory. */ ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); if (ih == NULL) panic("intr_establish: can't malloc handler info"); /* * Initialize vector info */ ih->ih_fun = ih_fun; ih->ih_arg = ih_arg; ih->ih_type = type; ih->ih_pri = pri; ih->ih_vector = vector; /* * Do some validity checking on the 'vector' argument and determine * vector list this interrupt should be on. */ switch (type & (AUTO_VEC|USER_VEC)) { case AUTO_VEC: if (vector < AVEC_MIN || vector > AVEC_MAX) { free(ih, M_DEVBUF); return NULL; } vec_list = &autovec_list[vector-1]; hard_vec = &autovects[vector-1]; ih->ih_intrcnt = &intrcnt_auto[vector-1]; break; case USER_VEC: if (vector < UVEC_MIN || vector > UVEC_MAX) { free(ih, M_DEVBUF); return NULL; } vec_list = &uservec_list[vector]; hard_vec = &uservects[vector]; ih->ih_intrcnt = &intrcnt_user[vector]; break; default: printf("%s: bogus vector type\n", __func__); free(ih, M_DEVBUF); return NULL; } /* * If the vec_list is empty, we insert ourselves at the head of the * list and we re-route the 'hard-vector' to the appropriate handler. */ if (vec_list->lh_first == NULL) { s = splhigh(); LIST_INSERT_HEAD(vec_list, ih, ih_link); if (type & FAST_VEC) *hard_vec = (u_long)ih->ih_fun; else if (*hard_vec != (u_long)intr_glue) { /* * Normally, all settable vectors are already * re-routed to the intr_glue() function. The * marvelous exception to these are the HBL/VBL * interrupts. They happen *very* often and * can't be turned off on the Falcon. So they * are normally vectored to an 'rte' instruction. */ *hard_vec = (u_long)intr_glue; } splx(s); return ih; } /* * Check for FAST_VEC botches */ cur_vec = vec_list->lh_first; if (cur_vec->ih_type & FAST_VEC) { free(ih, M_DEVBUF); printf("intr_establish: vector cannot be shared\n"); return NULL; } /* * We traverse the list and place ourselves after any handlers with * our current (or higher) priority level. */ for (cur_vec = vec_list->lh_first; cur_vec->ih_link.le_next != NULL; cur_vec = cur_vec->ih_link.le_next) { if (ih->ih_pri > cur_vec->ih_pri) { s = splhigh(); LIST_INSERT_BEFORE(cur_vec, ih, ih_link); splx(s); return ih; } } /* * We're the least important entry, it seems. We just go * on the end. */ s = splhigh(); LIST_INSERT_AFTER(cur_vec, ih, ih_link); splx(s); return ih; }
/* * Setup the game structure for use. * Here we create the initial game pieces for the game (5 'next' pieces, plus * the current piece and the 'hold' piece(total 7 game pieces). * We also allocate memory for the board colors, and set some initial * variables. */ int blocks_init(void) { size_t i; log_info("Initializing game data"); pgame = calloc(1, sizeof *pgame); if (!pgame) { log_err("Out of memory"); exit(EXIT_FAILURE); } pthread_mutex_init(&pgame->lock, NULL); pgame->level = 1; pgame->nsec = 1E9 - 1; pgame->pause_ticks = 1000; LIST_INIT(&pgame->blocks_head); /* We need a head of the list to properly add new blocks, so manually * add the head. Then use a loop to add the rest of the starting * blocks. */ for (i = 0; i < NEXT_BLOCKS_LEN +2; i++) { struct blocks *last, *np = malloc(sizeof *np); if (!np) { log_err("Out of memory"); exit(EXIT_FAILURE); } randomize_block(np); debug("Randomized new block: %d", i); /* Manually add the head, then continue adding the others */ if (i == 0) { LIST_INSERT_HEAD(&pgame->blocks_head, np, entries); continue; } /* Skip to end of list */ for (last = HOLD_BLOCK(); last->entries.le_next; last = last->entries.le_next) ; LIST_INSERT_AFTER(last, np, entries); } /* Allocate memory for colors */ for (i = 0; i < BLOCKS_MAX_ROWS; i++) { pgame->colors[i] = malloc(BLOCKS_MAX_COLUMNS * sizeof(*pgame->colors[i])); if (!pgame->colors[i]) { log_err("Out of memory"); exit(EXIT_FAILURE); } } return 1; }
int config_parse_dnssd_txt(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata) { _cleanup_(dnssd_txtdata_freep) DnssdTxtData *txt_data = NULL; DnssdService *s = userdata; DnsTxtItem *last = NULL; assert(filename); assert(lvalue); assert(rvalue); assert(s); if (isempty(rvalue)) { /* Flush out collected items */ s->txt_data_items = dnssd_txtdata_free_all(s->txt_data_items); return 0; } txt_data = new0(DnssdTxtData, 1); if (!txt_data) return log_oom(); for (;;) { _cleanup_free_ char *word = NULL; _cleanup_free_ char *key = NULL; _cleanup_free_ char *value = NULL; _cleanup_free_ void *decoded = NULL; size_t length = 0; DnsTxtItem *i; int r; r = extract_first_word(&rvalue, &word, NULL, EXTRACT_QUOTES|EXTRACT_CUNESCAPE|EXTRACT_CUNESCAPE_RELAX); if (r == 0) break; if (r == -ENOMEM) return log_oom(); if (r < 0) return log_syntax(unit, LOG_ERR, filename, line, r, "Invalid syntax, ignoring: %s", rvalue); r = split_pair(word, "=", &key, &value); if (r == -ENOMEM) return log_oom(); if (r == -EINVAL) key = TAKE_PTR(word); if (!ascii_is_valid(key)) { log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid syntax, ignoring: %s", key); return -EINVAL; } switch (ltype) { case DNS_TXT_ITEM_DATA: if (value) { r = unbase64mem(value, strlen(value), &decoded, &length); if (r == -ENOMEM) return log_oom(); if (r < 0) return log_syntax(unit, LOG_ERR, filename, line, r, "Invalid base64 encoding, ignoring: %s", value); } r = dnssd_txt_item_new_from_data(key, decoded, length, &i); if (r < 0) return log_oom(); break; case DNS_TXT_ITEM_TEXT: r = dnssd_txt_item_new_from_string(key, value, &i); if (r < 0) return log_oom(); break; default: assert_not_reached("Unknown type of Txt config"); } LIST_INSERT_AFTER(items, txt_data->txt, last, i); last = i; } if (!LIST_IS_EMPTY(txt_data->txt)) { LIST_PREPEND(items, s->txt_data_items, txt_data); txt_data = NULL; } return 0; }
int udp6_input(struct mbuf **mp, int *offp, int proto) { struct mbuf *m = *mp; struct ip6_hdr *ip6; struct udphdr *uh; struct inpcb *in6p; struct mbuf *opts = NULL; int off = *offp; int plen, ulen; struct sockaddr_in6 udp_in6; struct socket *so; struct inpcbinfo *pcbinfo = &udbinfo[0]; IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), IPPROTO_DONE); ip6 = mtod(m, struct ip6_hdr *); if (faithprefix_p != NULL && (*faithprefix_p)(&ip6->ip6_dst)) { /* XXX send icmp6 host/port unreach? */ m_freem(m); return IPPROTO_DONE; } udp_stat.udps_ipackets++; plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); uh = (struct udphdr *)((caddr_t)ip6 + off); ulen = ntohs((u_short)uh->uh_ulen); if (plen != ulen) { udp_stat.udps_badlen++; goto bad; } /* * Checksum extended UDP header and data. */ if (uh->uh_sum == 0) udp_stat.udps_nosum++; else if (in6_cksum(m, IPPROTO_UDP, off, ulen) != 0) { udp_stat.udps_badsum++; goto bad; } if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { struct inpcb *last, *marker; /* * Deliver a multicast datagram to all sockets * for which the local and remote addresses and ports match * those of the incoming datagram. This allows more than * one process to receive multicasts on the same port. * (This really ought to be done for unicast datagrams as * well, but that would cause problems with existing * applications that open both address-specific sockets and * a wildcard socket listening to the same port -- they would * end up receiving duplicates of every unicast datagram. * Those applications open the multiple sockets to overcome an * inadequacy of the UDP socket interface, but for backwards * compatibility we avoid the problem here rather than * fixing the interface. Maybe 4.5BSD will remedy this?) */ /* * In a case that laddr should be set to the link-local * address (this happens in RIPng), the multicast address * specified in the received packet does not match with * laddr. To cure this situation, the matching is relaxed * if the receiving interface is the same as one specified * in the socket and if the destination multicast address * matches one of the multicast groups specified in the socket. */ /* * Construct sockaddr format source address. */ init_sin6(&udp_in6, m); /* general init */ udp_in6.sin6_port = uh->uh_sport; /* * KAME note: traditionally we dropped udpiphdr from mbuf here. * We need udphdr for IPsec processing so we do that later. */ /* * Locate pcb(s) for datagram. * (Algorithm copied from raw_intr().) */ last = NULL; marker = in_pcbmarker(mycpuid); GET_PCBINFO_TOKEN(pcbinfo); LIST_INSERT_HEAD(&pcbinfo->pcblisthead, marker, inp_list); while ((in6p = LIST_NEXT(marker, inp_list)) != NULL) { LIST_REMOVE(marker, inp_list); LIST_INSERT_AFTER(in6p, marker, inp_list); if (in6p->inp_flags & INP_PLACEMARKER) continue; if (!INP_ISIPV6(in6p)) continue; if (in6p->in6p_lport != uh->uh_dport) continue; if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr)) { if (!IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst) && !in6_mcmatch(in6p, &ip6->ip6_dst, m->m_pkthdr.rcvif)) continue; } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { if (!IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src) || in6p->in6p_fport != uh->uh_sport) continue; } if (last != NULL) { struct mbuf *n; #ifdef IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject_so(m, last->inp_socket)) ipsec6stat.in_polvio++; /* do not inject data into pcb */ else #endif /* IPSEC */ #ifdef FAST_IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject(m, last)) ; else #endif /* FAST_IPSEC */ if ((n = m_copy(m, 0, M_COPYALL)) != NULL) { /* * KAME NOTE: do not * m_copy(m, offset, ...) above. * ssb_appendaddr() expects M_PKTHDR, * and m_copy() will copy M_PKTHDR * only if offset is 0. */ so = last->in6p_socket; if ((last->in6p_flags & IN6P_CONTROLOPTS) || (so->so_options & SO_TIMESTAMP)) { ip6_savecontrol(last, &opts, ip6, n); } m_adj(n, off + sizeof(struct udphdr)); lwkt_gettoken(&so->so_rcv.ssb_token); if (ssb_appendaddr(&so->so_rcv, (struct sockaddr *)&udp_in6, n, opts) == 0) { m_freem(n); if (opts) m_freem(opts); udp_stat.udps_fullsock++; } else { sorwakeup(so); } lwkt_reltoken(&so->so_rcv.ssb_token); opts = NULL; } } last = in6p; /* * Don't look for additional matches if this one does * not have either the SO_REUSEPORT or SO_REUSEADDR * socket options set. This heuristic avoids searching * through all pcbs in the common case of a non-shared * port. It assumes that an application will never * clear these options after setting them. */ if ((last->in6p_socket->so_options & (SO_REUSEPORT | SO_REUSEADDR)) == 0) break; } LIST_REMOVE(marker, inp_list); REL_PCBINFO_TOKEN(pcbinfo); if (last == NULL) { /* * No matching pcb found; discard datagram. * (No need to send an ICMP Port Unreachable * for a broadcast or multicast datgram.) */ udp_stat.udps_noport++; udp_stat.udps_noportmcast++; goto bad; } #ifdef IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject_so(m, last->inp_socket)) { ipsec6stat.in_polvio++; goto bad; } #endif /* IPSEC */ #ifdef FAST_IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject(m, last)) { goto bad; } #endif /* FAST_IPSEC */ if (last->in6p_flags & IN6P_CONTROLOPTS || last->in6p_socket->so_options & SO_TIMESTAMP) ip6_savecontrol(last, &opts, ip6, m); m_adj(m, off + sizeof(struct udphdr)); so = last->in6p_socket; lwkt_gettoken(&so->so_rcv.ssb_token); if (ssb_appendaddr(&so->so_rcv, (struct sockaddr *)&udp_in6, m, opts) == 0) { udp_stat.udps_fullsock++; lwkt_reltoken(&so->so_rcv.ssb_token); goto bad; } sorwakeup(so); lwkt_reltoken(&so->so_rcv.ssb_token); return IPPROTO_DONE; } /* * Locate pcb for datagram. */ in6p = in6_pcblookup_hash(pcbinfo, &ip6->ip6_src, uh->uh_sport, &ip6->ip6_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif); if (in6p == NULL) { if (log_in_vain) { char buf[INET6_ADDRSTRLEN]; strcpy(buf, ip6_sprintf(&ip6->ip6_dst)); log(LOG_INFO, "Connection attempt to UDP [%s]:%d from [%s]:%d\n", buf, ntohs(uh->uh_dport), ip6_sprintf(&ip6->ip6_src), ntohs(uh->uh_sport)); } udp_stat.udps_noport++; if (m->m_flags & M_MCAST) { kprintf("UDP6: M_MCAST is set in a unicast packet.\n"); udp_stat.udps_noportmcast++; goto bad; } icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT, 0); return IPPROTO_DONE; } #ifdef IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject_so(m, in6p->in6p_socket)) { ipsec6stat.in_polvio++; goto bad; } #endif /* IPSEC */ #ifdef FAST_IPSEC /* * Check AH/ESP integrity. */ if (ipsec6_in_reject(m, in6p)) { goto bad; } #endif /* FAST_IPSEC */ /* * Construct sockaddr format source address. * Stuff source address and datagram in user buffer. */ init_sin6(&udp_in6, m); /* general init */ udp_in6.sin6_port = uh->uh_sport; if (in6p->in6p_flags & IN6P_CONTROLOPTS || in6p->in6p_socket->so_options & SO_TIMESTAMP) ip6_savecontrol(in6p, &opts, ip6, m); m_adj(m, off + sizeof(struct udphdr)); so = in6p->in6p_socket; lwkt_gettoken(&so->so_rcv.ssb_token); if (ssb_appendaddr(&so->so_rcv, (struct sockaddr *)&udp_in6, m, opts) == 0) { udp_stat.udps_fullsock++; lwkt_reltoken(&so->so_rcv.ssb_token); goto bad; } sorwakeup(so); lwkt_reltoken(&so->so_rcv.ssb_token); return IPPROTO_DONE; bad: if (m) m_freem(m); if (opts) m_freem(opts); return IPPROTO_DONE; }
static void sctp_process_tcb(struct xsctp_tcb *xstcb, char *buf, const size_t buflen, size_t *offset, int *indent) { int i, xl_total = 0, xr_total = 0, x_max; struct xsctp_raddr *xraddr; struct xsctp_laddr *xladdr; struct xladdr_entry *prev_xl = NULL, *xl = NULL, *xl_tmp; struct xraddr_entry *prev_xr = NULL, *xr = NULL, *xr_tmp; LIST_INIT(&xladdr_head); LIST_INIT(&xraddr_head); /* * Make `struct xladdr_list' list and `struct xraddr_list' list * to handle the address flexibly. */ while (*offset < buflen) { xladdr = (struct xsctp_laddr *)(buf + *offset); *offset += sizeof(struct xsctp_laddr); if (xladdr->last == 1) break; prev_xl = xl; xl = malloc(sizeof(struct xladdr_entry)); if (xl == NULL) { warnx("malloc %lu bytes", (u_long)sizeof(struct xladdr_entry)); goto out; } xl->xladdr = xladdr; if (prev_xl == NULL) LIST_INSERT_HEAD(&xladdr_head, xl, xladdr_entries); else LIST_INSERT_AFTER(prev_xl, xl, xladdr_entries); xl_total++; } while (*offset < buflen) { xraddr = (struct xsctp_raddr *)(buf + *offset); *offset += sizeof(struct xsctp_raddr); if (xraddr->last == 1) break; prev_xr = xr; xr = malloc(sizeof(struct xraddr_entry)); if (xr == NULL) { warnx("malloc %lu bytes", (u_long)sizeof(struct xraddr_entry)); goto out; } xr->xraddr = xraddr; if (prev_xr == NULL) LIST_INSERT_HEAD(&xraddr_head, xr, xraddr_entries); else LIST_INSERT_AFTER(prev_xr, xr, xraddr_entries); xr_total++; } /* * Let's print the address infos. */ xl = LIST_FIRST(&xladdr_head); xr = LIST_FIRST(&xraddr_head); x_max = (xl_total > xr_total) ? xl_total : xr_total; for (i = 0; i < x_max; i++) { if (((*indent == 0) && i > 0) || *indent > 0) printf("%-12s ", " "); if (xl != NULL) { sctp_print_address(&(xl->xladdr->address), htons(xstcb->local_port), numeric_port); } else { if (Wflag) { printf("%-45s ", " "); } else { printf("%-22s ", " "); } } if (xr != NULL && !Lflag) { sctp_print_address(&(xr->xraddr->address), htons(xstcb->remote_port), numeric_port); } if (xl != NULL) xl = LIST_NEXT(xl, xladdr_entries); if (xr != NULL) xr = LIST_NEXT(xr, xraddr_entries); if (i == 0 && !Lflag) sctp_statesprint(xstcb->state); if (i < x_max) putchar('\n'); } out: /* * Free the list which be used to handle the address. */ xl = LIST_FIRST(&xladdr_head); while (xl != NULL) { xl_tmp = LIST_NEXT(xl, xladdr_entries); free(xl); xl = xl_tmp; } xr = LIST_FIRST(&xraddr_head); while (xr != NULL) { xr_tmp = LIST_NEXT(xr, xraddr_entries); free(xr); xr = xr_tmp; } }
static void do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2, struct vmspace *vm2, struct file *fp_procdesc) { struct proc *p1, *pptr; int trypid; struct filedesc *fd; struct filedesc_to_leader *fdtol; struct sigacts *newsigacts; sx_assert(&proctree_lock, SX_SLOCKED); sx_assert(&allproc_lock, SX_XLOCKED); p1 = td->td_proc; trypid = fork_findpid(fr->fr_flags); sx_sunlock(&proctree_lock); p2->p_state = PRS_NEW; /* protect against others */ p2->p_pid = trypid; AUDIT_ARG_PID(p2->p_pid); LIST_INSERT_HEAD(&allproc, p2, p_list); allproc_gen++; LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); tidhash_add(td2); PROC_LOCK(p2); PROC_LOCK(p1); sx_xunlock(&allproc_lock); bcopy(&p1->p_startcopy, &p2->p_startcopy, __rangeof(struct proc, p_startcopy, p_endcopy)); pargs_hold(p2->p_args); PROC_UNLOCK(p1); bzero(&p2->p_startzero, __rangeof(struct proc, p_startzero, p_endzero)); /* Tell the prison that we exist. */ prison_proc_hold(p2->p_ucred->cr_prison); PROC_UNLOCK(p2); /* * Malloc things while we don't hold any locks. */ if (fr->fr_flags & RFSIGSHARE) newsigacts = NULL; else newsigacts = sigacts_alloc(); /* * Copy filedesc. */ if (fr->fr_flags & RFCFDG) { fd = fdinit(p1->p_fd, false); fdtol = NULL; } else if (fr->fr_flags & RFFDG) { fd = fdcopy(p1->p_fd); fdtol = NULL; } else { fd = fdshare(p1->p_fd); if (p1->p_fdtol == NULL) p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL, p1->p_leader); if ((fr->fr_flags & RFTHREAD) != 0) { /* * Shared file descriptor table, and shared * process leaders. */ fdtol = p1->p_fdtol; FILEDESC_XLOCK(p1->p_fd); fdtol->fdl_refcount++; FILEDESC_XUNLOCK(p1->p_fd); } else { /* * Shared file descriptor table, and different * process leaders. */ fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p1->p_fd, p2); } } /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ PROC_LOCK(p2); PROC_LOCK(p1); bzero(&td2->td_startzero, __rangeof(struct thread, td_startzero, td_endzero)); bcopy(&td->td_startcopy, &td2->td_startcopy, __rangeof(struct thread, td_startcopy, td_endcopy)); bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name)); td2->td_sigstk = td->td_sigstk; td2->td_flags = TDF_INMEM; td2->td_lend_user_pri = PRI_MAX; #ifdef VIMAGE td2->td_vnet = NULL; td2->td_vnet_lpush = NULL; #endif /* * Allow the scheduler to initialize the child. */ thread_lock(td); sched_fork(td, td2); thread_unlock(td); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. */ p2->p_flag = P_INMEM; p2->p_flag2 = p1->p_flag2 & (P2_NOTRACE | P2_NOTRACE_EXEC | P2_TRAPCAP); p2->p_swtick = ticks; if (p1->p_flag & P_PROFIL) startprofclock(p2); /* * Whilst the proc lock is held, copy the VM domain data out * using the VM domain method. */ vm_domain_policy_init(&p2->p_vm_dom_policy); vm_domain_policy_localcopy(&p2->p_vm_dom_policy, &p1->p_vm_dom_policy); if (fr->fr_flags & RFSIGSHARE) { p2->p_sigacts = sigacts_hold(p1->p_sigacts); } else { sigacts_copy(newsigacts, p1->p_sigacts); p2->p_sigacts = newsigacts; } if (fr->fr_flags & RFTSIGZMB) p2->p_sigparent = RFTSIGNUM(fr->fr_flags); else if (fr->fr_flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; else p2->p_sigparent = SIGCHLD; p2->p_textvp = p1->p_textvp; p2->p_fd = fd; p2->p_fdtol = fdtol; if (p1->p_flag2 & P2_INHERIT_PROTECTED) { p2->p_flag |= P_PROTECTED; p2->p_flag2 |= P2_INHERIT_PROTECTED; } /* * p_limit is copy-on-write. Bump its refcount. */ lim_fork(p1, p2); thread_cow_get_proc(td2, p2); pstats_fork(p1->p_stats, p2->p_stats); PROC_UNLOCK(p1); PROC_UNLOCK(p2); /* Bump references to the text vnode (for procfs). */ if (p2->p_textvp) vrefact(p2->p_textvp); /* * Set up linkage for kernel based threading. */ if ((fr->fr_flags & RFTHREAD) != 0) { mtx_lock(&ppeers_lock); p2->p_peers = p1->p_peers; p1->p_peers = p2; p2->p_leader = p1->p_leader; mtx_unlock(&ppeers_lock); PROC_LOCK(p1->p_leader); if ((p1->p_leader->p_flag & P_WEXIT) != 0) { PROC_UNLOCK(p1->p_leader); /* * The task leader is exiting, so process p1 is * going to be killed shortly. Since p1 obviously * isn't dead yet, we know that the leader is either * sending SIGKILL's to all the processes in this * task or is sleeping waiting for all the peers to * exit. We let p1 complete the fork, but we need * to go ahead and kill the new process p2 since * the task leader may not get a chance to send * SIGKILL to it. We leave it on the list so that * the task leader will wait for this new process * to commit suicide. */ PROC_LOCK(p2); kern_psignal(p2, SIGKILL); PROC_UNLOCK(p2); } else PROC_UNLOCK(p1->p_leader); } else { p2->p_peers = NULL; p2->p_leader = p2; } sx_xlock(&proctree_lock); PGRP_LOCK(p1->p_pgrp); PROC_LOCK(p2); PROC_LOCK(p1); /* * Preserve some more flags in subprocess. P_PROFIL has already * been preserved. */ p2->p_flag |= p1->p_flag & P_SUGID; td2->td_pflags |= (td->td_pflags & TDP_ALTSTACK) | TDP_FORKING; SESS_LOCK(p1->p_session); if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) p2->p_flag |= P_CONTROLT; SESS_UNLOCK(p1->p_session); if (fr->fr_flags & RFPPWAIT) p2->p_flag |= P_PPWAIT; p2->p_pgrp = p1->p_pgrp; LIST_INSERT_AFTER(p1, p2, p_pglist); PGRP_UNLOCK(p1->p_pgrp); LIST_INIT(&p2->p_children); LIST_INIT(&p2->p_orphans); callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0); /* * If PF_FORK is set, the child process inherits the * procfs ioctl flags from its parent. */ if (p1->p_pfsflags & PF_FORK) { p2->p_stops = p1->p_stops; p2->p_pfsflags = p1->p_pfsflags; } /* * This begins the section where we must prevent the parent * from being swapped. */ _PHOLD(p1); PROC_UNLOCK(p1); /* * Attach the new process to its parent. * * If RFNOWAIT is set, the newly created process becomes a child * of init. This effectively disassociates the child from the * parent. */ if ((fr->fr_flags & RFNOWAIT) != 0) { pptr = p1->p_reaper; p2->p_reaper = pptr; } else { p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ? p1 : p1->p_reaper; pptr = p1; } p2->p_pptr = pptr; LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling); LIST_INIT(&p2->p_reaplist); LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling); if (p2->p_reaper == p1) p2->p_reapsubtree = p2->p_pid; sx_xunlock(&proctree_lock); /* Inform accounting that we have forked. */ p2->p_acflag = AFORK; PROC_UNLOCK(p2); #ifdef KTRACE ktrprocfork(p1, p2); #endif /* * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ vm_forkproc(td, p2, td2, vm2, fr->fr_flags); if (fr->fr_flags == (RFFDG | RFPROC)) { VM_CNT_INC(v_forks); VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { VM_CNT_INC(v_vforks); VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { VM_CNT_INC(v_kthreads); VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { VM_CNT_INC(v_rforks); VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } /* * Associate the process descriptor with the process before anything * can happen that might cause that process to need the descriptor. * However, don't do this until after fork(2) can no longer fail. */ if (fr->fr_flags & RFPROCDESC) procdesc_new(p2, fr->fr_pd_flags); /* * Both processes are set up, now check if any loadable modules want * to adjust anything. */ EVENTHANDLER_INVOKE(process_fork, p1, p2, fr->fr_flags); /* * Set the child start time and mark the process as being complete. */ PROC_LOCK(p2); PROC_LOCK(p1); microuptime(&p2->p_stats->p_start); PROC_SLOCK(p2); p2->p_state = PRS_NORMAL; PROC_SUNLOCK(p2); #ifdef KDTRACE_HOOKS /* * Tell the DTrace fasttrap provider about the new process so that any * tracepoints inherited from the parent can be removed. We have to do * this only after p_state is PRS_NORMAL since the fasttrap module will * use pfind() later on. */ if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork) dtrace_fasttrap_fork(p1, p2); #endif /* * Hold the process so that it cannot exit after we make it runnable, * but before we wait for the debugger. */ _PHOLD(p2); if (p1->p_ptevents & PTRACE_FORK) { /* * Arrange for debugger to receive the fork event. * * We can report PL_FLAG_FORKED regardless of * P_FOLLOWFORK settings, but it does not make a sense * for runaway child. */ td->td_dbgflags |= TDB_FORK; td->td_dbg_forked = p2->p_pid; td2->td_dbgflags |= TDB_STOPATFORK; } if (fr->fr_flags & RFPPWAIT) { td->td_pflags |= TDP_RFPPWAIT; td->td_rfppwait_p = p2; td->td_dbgflags |= TDB_VFORK; } PROC_UNLOCK(p2); /* * Now can be swapped. */ _PRELE(p1); PROC_UNLOCK(p1); /* * Tell any interested parties about the new process. */ knote_fork(p1->p_klist, p2->p_pid); SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags); if (fr->fr_flags & RFPROCDESC) { procdesc_finit(p2->p_procdesc, fp_procdesc); fdrop(fp_procdesc, td); } if ((fr->fr_flags & RFSTOPPED) == 0) { /* * If RFSTOPPED not requested, make child runnable and * add to run queue. */ thread_lock(td2); TD_SET_CAN_RUN(td2); sched_add(td2, SRQ_BORING); thread_unlock(td2); if (fr->fr_pidp != NULL) *fr->fr_pidp = p2->p_pid; } else { *fr->fr_procp = p2; } PROC_LOCK(p2); /* * Wait until debugger is attached to child. */ while (td2->td_proc == p2 && (td2->td_dbgflags & TDB_STOPATFORK) != 0) cv_wait(&p2->p_dbgwait, &p2->p_mtx); _PRELE(p2); racct_proc_fork_done(p2); PROC_UNLOCK(p2); }