int periodic_wake_create(spdid_t spdinv, unsigned int period) { struct thread_event *te; unsigned short int tid = cos_get_thd_id(); spdid_t spdid = cos_spd_id(); event_time_t n, t; if (period < 1) return -1; TAKE(spdid); te = te_pget(tid); if (NULL == te) BUG(); if (te->flags & TE_PERIODIC) { assert(!EMPTY_LIST(te, next, prev)); REM_LIST(te, next, prev); } assert(EMPTY_LIST(te, next, prev)); te->flags |= TE_PERIODIC; te->period = period; ticks = sched_timestamp(); te->event_expiration = n = ticks + period; assert(n > ticks); t = next_event_time(); assert(t > ticks); insert_pevent(te); if (t > n) sched_timeout(spdid, n-ticks); RELEASE(spdid); return 0; }
/* * Return 1 if the inserted event is closer in the future than any * others, 0 otherwise. */ static int __insert_event(struct thread_event *te, struct thread_event *events) { struct thread_event *tmp; assert(NULL != te); assert(te->event_expiration); assert(EMPTY_LIST(te, next, prev)); assert(events->next && events->prev); if (EMPTY_LIST(events, next, prev)) { ADD_LIST(events, te, next, prev); } else for (tmp = FIRST_LIST(events, next, prev) ; ; /* condition built into body (see break;) */ tmp = FIRST_LIST(tmp, next, prev)) { assert(tmp); struct thread_event *prev_te = LAST_LIST(tmp, next, prev); assert(prev_te); assert(tmp->prev && tmp->next); /* We found our place in the list OR end of list. * Either way, insert before this position */ if (tmp->event_expiration > te->event_expiration || events == tmp) { ADD_LIST(prev_te, te, next, prev); assert(prev_te->next == te && te->prev == prev_te); assert(te->next == tmp && tmp->prev == te); break; } assert(tmp->next && tmp->prev); } assert(!EMPTY_LIST(events, next, prev)); assert(!EMPTY_LIST(te, next, prev)); return 0; }
/* * FIXME: allow amnt to be specified in time units rather than ticks. */ int timed_event_block(spdid_t spdinv, unsigned int amnt) { spdid_t spdid = cos_spd_id(); struct thread_event *te; int block_time; event_time_t t; if (amnt == 0) return 0; /* * Convert from usec to ticks * * +2 here as we don't know how far through the current clock * tick we are _and_ we don't know how far into the clock tick * the wakeup time is. The sleep is supposed to be for _at * least_ amnt clock ticks, thus here we are conservative. */ //amnt = (amnt/(unsigned int)usec_per_tick) + 2; /* update: seems like +1 should be enough */ amnt++; TAKE(spdid); te = te_get(cos_get_thd_id()); if (NULL == te) BUG(); assert(EMPTY_LIST(te, next, prev)); te->thread_id = cos_get_thd_id(); te->flags &= ~TE_TIMED_OUT; te->flags |= TE_BLOCKED; ticks = sched_timestamp(); te->event_expiration = ticks + amnt; block_time = ticks; assert(te->event_expiration > ticks); t = next_event_time(); insert_event(te); assert(te->next && te->prev && !EMPTY_LIST(te, next, prev)); RELEASE(spdid); if (t != next_event_time()) sched_timeout(spdid, amnt); if (-1 == sched_block(spdid, 0)) { prints("fprr: sched block failed in timed_event_block."); } /* we better have been taking off the list! */ assert(EMPTY_LIST(te, next, prev)); if (te->flags & TE_TIMED_OUT) return TIMER_EXPIRED; /* * The event has already been removed from event list in * event_expiration by the timeout thread. * * Minus 1 here as we must report the amount of time we are * sure we waited for. As we don't know how far into the tick * we were when we slept, and how far the wakeup is into a * tick, we must account for this. */ return ((int)ticks - block_time - 1); //*usec_per_tick; /* expressed in ticks currently */ }
static inline event_time_t next_event_time(void) { event_time_t e = TIMER_NO_EVENTS, p = TIMER_NO_EVENTS; e = EMPTY_LIST(&events, next, prev) ? TIMER_NO_EVENTS : FIRST_LIST(&events, next, prev)->event_expiration; p = EMPTY_LIST(&periodic, next, prev) ? TIMER_NO_EVENTS : FIRST_LIST(&periodic, next, prev)->event_expiration; /* assume here that TIMER_NO_EVENTS > all other values */ return MIN(e, p); }
static void *alloc_rb_buff(rb_meta_t *r) { struct buff_page *p; int i; void *ret = NULL; lock_take(&r->l); if (EMPTY_LIST(&r->avail_pages, next, prev)) { if (NULL == (p = alloc_buff_page())) { lock_release(&r->l); return NULL; } ADD_LIST(&r->avail_pages, p, next, prev); } p = FIRST_LIST(&r->avail_pages, next, prev); assert(p->amnt_buffs < NP_NUM_BUFFS); for (i = 0 ; i < NP_NUM_BUFFS ; i++) { if (p->buff_used[i] == 0) { p->buff_used[i] = 1; ret = p->buffs[i]; p->amnt_buffs++; break; } } assert(NULL != ret); if (p->amnt_buffs == NP_NUM_BUFFS) { REM_LIST(p, next, prev); ADD_LIST(&r->used_pages, p, next, prev); } lock_release(&r->l); return ret; }
static void dev_ifa_notify(struct proto *p, unsigned c, struct ifa *ad) { struct rt_dev_config *P = (void *) p->cf; if (!EMPTY_LIST(P->iface_list) && !iface_patt_find(&P->iface_list, ad->iface, ad->iface->addr)) /* Empty list is automagically treated as "*" */ return; if (ad->flags & IA_SECONDARY) return; if (ad->scope <= SCOPE_LINK) return; if (c & IF_CHANGE_DOWN) { net *n; DBG("dev_if_notify: %s:%I going down\n", ad->iface->name, ad->ip); n = net_find(p->table, ad->prefix, ad->pxlen); if (!n) { DBG("dev_if_notify: device shutdown: prefix not found\n"); return; } /* Use iface ID as local source ID */ struct rte_src *src = rt_get_source(p, ad->iface->index); rte_update2(p->main_ahook, n, NULL, src); } else if (c & IF_CHANGE_UP) { rta *a; net *n; rte *e; DBG("dev_if_notify: %s:%I going up\n", ad->iface->name, ad->ip); /* Use iface ID as local source ID */ struct rte_src *src = rt_get_source(p, ad->iface->index); rta a0 = { .src = src, .source = RTS_DEVICE, .scope = SCOPE_UNIVERSE, .cast = RTC_UNICAST, .dest = RTD_DEVICE, .iface = ad->iface }; a = rta_lookup(&a0); n = net_get(p->table, ad->prefix, ad->pxlen); e = rte_get_temp(a); e->net = n; e->pflags = 0; rte_update2(p->main_ahook, n, e, src); } }
/* Return the top address of the page it is mapped into the * component */ static vaddr_t stkmgr_stk_add_to_spd(struct cos_stk_item *stk_item, struct spd_stk_info *info) { vaddr_t d_addr, stk_addr, ret; spdid_t d_spdid; assert(info && stk_item); assert(EMPTY_LIST(stk_item, next, prev)); d_spdid = info->spdid; // FIXME: Race condition ret = d_addr = (vaddr_t)valloc_alloc(cos_spd_id(), d_spdid, 1); /* d_addr = info->ci->cos_heap_ptr; */ /* info->ci->cos_heap_ptr += PAGE_SIZE; */ /* ret = info->ci->cos_heap_ptr; */ // DOUT("Setting flags and assigning flags\n"); stk_item->stk->flags = 0xDEADBEEF; stk_item->stk->next = (void *)0xDEADBEEF; stk_addr = (vaddr_t)(stk_item->hptr); if(d_addr != mman_alias_page(cos_spd_id(), stk_addr, d_spdid, d_addr)){ printc("<stkmgr>: Unable to map stack into component"); BUG(); } // DOUT("Mapped page\n"); stk_item->d_addr = d_addr; stk_item->parent_spdid = d_spdid; // Add stack to allocated stack array // DOUT("Adding to local spdid stk list\n"); ADD_LIST(&info->stk_list, stk_item, next, prev); info->num_allocated++; assert(info->num_allocated == stkmgr_num_alloc_stks(info->spdid)); return ret; }
// all cbufs that created for this component void mgr_map_client_mem(struct cos_cbuf_item *cci, struct spd_tmem_info *sti) { char *l_addr, *d_addr; spdid_t d_spdid; // struct cb_desc *d; assert(sti && cci); assert(EMPTY_LIST(cci, next, prev)); d_spdid = sti->spdid; /* TODO: multiple pages cbuf! */ d_addr = valloc_alloc(cos_spd_id(), sti->spdid, 1); l_addr = cci->desc.addr; //initialized in cos_init() assert(d_addr && l_addr); /* ...map it into the requesting component */ if (unlikely(!mman_alias_page(cos_spd_id(), (vaddr_t)l_addr, d_spdid, (vaddr_t)d_addr))) goto err; /* DOUT("<<<MAPPED>>> mgr addr %p client addr %p\n ",l_addr, d_addr); */ cci->desc.owner.addr = (vaddr_t)d_addr; cci->parent_spdid = d_spdid; assert(cci->desc.cbid == 0); // add the cbuf to shared vect here? now we do it in the client. // and l_addr and d_addr has been assinged done: return; err: DOUT("Cbuf mgr: Cannot alias page to client!\n"); mman_release_page(cos_spd_id(), (vaddr_t)l_addr, 0); /* valloc_free(cos_spd_id(), cos_spd_id(), l_addr, 1); */ valloc_free(cos_spd_id(), d_spdid, (void *)d_addr, 1); goto done; }
void spd_wake_threads(spdid_t spdid) { struct spd_stk_info *ssi; ssi = get_spd_stk_info(spdid); printc("************ waking up %d threads for spd %d ************\n", ssi->num_blocked_thds, spdid); blklist_wake_threads(&ssi->bthd_list); assert(EMPTY_LIST(&ssi->bthd_list, next, prev)); ssi->num_blocked_thds = 0; }
static int fp_thread_params(struct sched_thd *t, char *p) { int prio, tmp; char curr = p[0]; struct sched_thd *c; assert(t); switch (curr) { case 'r': /* priority relative to current thread */ c = sched_get_current(); assert(c); tmp = atoi(&p[1]); prio = sched_get_metric(c)->priority + tmp; memcpy(sched_get_accounting(t), sched_get_accounting(c), sizeof(struct sched_accounting)); #ifdef DEFERRABLE if (sched_get_accounting(t)->T) ADD_LIST(&servers, t, sched_next, sched_prev); #endif if (prio > PRIO_LOWEST) prio = PRIO_LOWEST; break; case 'a': /* absolute priority */ prio = atoi(&p[1]); break; case 'i': /* idle thread */ prio = PRIO_LOWEST; break; case 't': /* timer thread */ prio = PRIO_HIGHEST; break; #ifdef DEFERRABLE case 'd': { prio = ds_parse_params(t, p); if (EMPTY_LIST(t, sched_next, sched_prev) && sched_get_accounting(t)->T) { ADD_LIST(&servers, t, sched_next, sched_prev); } fp_move_end_runnable(t); break; } #endif default: printc("unknown priority option @ %s, setting to low\n", p); prio = PRIO_LOW; } if (sched_thd_ready(t)) fp_rem_thd(t); fp_add_thd(t, prio); return 0; }
static inline void fp_rem_thd(struct sched_thd *t) { u16_t p = sched_get_metric(t)->priority; /* if on a list _and_ no other thread at this priority? */ if (!EMPTY_LIST(t, prio_next, prio_prev) && t->prio_next == t->prio_prev) { mask_unset(p); } REM_LIST(t, prio_next, prio_prev); }
static inline int freelist_add(struct cos_stk_item *csi) { assert(EMPTY_LIST(csi, next, prev)); assert(csi->parent_spdid == 0); stacks_allocated--; csi->free_next = free_stack_list; free_stack_list = csi; return 0; }
void ev2_schedule(struct event *e) { struct birdloop *loop = birdloop_current(); if (loop->poll_active && EMPTY_LIST(loop->event_list)) wakeup_kick(loop); if (e->n.next) rem_node(&e->n); add_tail(&loop->event_list, &e->n); }
static inline void evt_grp_free(struct evt_grp *g) { int i; if (!EMPTY_LIST(g, next, prev)) { REM_LIST(g, next, prev); } while (!EMPTY_LIST(&g->events, next, prev)) { struct evt *e; e = FIRST_LIST(&g->events, next, prev); REM_LIST(e, next, prev); } for (i = 0 ; i < EVT_NUM_PRIOS ; i++) { while (!EMPTY_LIST(&g->triggered[i], next, prev)) { struct evt *e; e = FIRST_LIST(&g->triggered[i], next, prev); REM_LIST(e, next, prev); } } free(g); }
static void mapping_del(struct mapping *m) { assert(m); mapping_del_children(m); assert(!m->c); if (m->p && m->p->c == m) { if (EMPTY_LIST(m, _s, s_)) m->p->c = NULL; else m->p->c = FIRST_LIST(m, _s, s_); } m->p = NULL; REM_LIST(m, _s, s_); __mapping_destroy(m); }
static inline int spd_freelist_add(spdid_t spdid, struct cos_stk_item *csi) { struct spd_stk_info *ssi = get_spd_stk_info(spdid); /* Should either belong to this spd, or not to another (we * don't want it mapped into two components) */ assert(csi->parent_spdid == spdid || EMPTY_LIST(csi, next, prev)); assert(ssi->ci); /* FIXME: race */ csi->stk->next = (struct cos_stk*)ssi->ci->cos_stacks.freelists[0].freelist; ssi->ci->cos_stacks.freelists[0].freelist = D_COS_STK_ADDR(csi->d_addr); return 0; }
static int stkmgr_stack_find_home(struct cos_stk_item *csi, struct spd_stk_info *prev) { struct spd_stk_info *dest; assert(EMPTY_LIST(csi, next, prev)); dest = stkmgr_find_spd_requiring_stk(); if (!dest) { freelist_add(csi); } else { assert(SPD_HAS_BLK_THD(dest)); stkmgr_stk_add_to_spd(csi, dest); spd_freelist_add(dest->spdid, csi); spd_wake_threads(dest->spdid); } return 0; }
int xfr_process_list(knot_pkt_t *pkt, xfr_put_cb process_item, struct query_data *qdata) { if (pkt == NULL || qdata == NULL || qdata->ext == NULL) { return KNOT_EINVAL; } int ret = KNOT_EOK; mm_ctx_t *mm = qdata->mm; struct xfr_proc *xfer = qdata->ext; zone_contents_t *zone = qdata->zone->contents; knot_rrset_t soa_rr = node_rrset(zone->apex, KNOT_RRTYPE_SOA); /* Prepend SOA on first packet. */ if (xfer->npkts == 0) { ret = knot_pkt_put(pkt, 0, &soa_rr, KNOT_PF_NOTRUNC); if (ret != KNOT_EOK) { return ret; } } /* Process all items in the list. */ while (!EMPTY_LIST(xfer->nodes)) { ptrnode_t *head = HEAD(xfer->nodes); ret = process_item(pkt, head->d, xfer); if (ret == KNOT_EOK) { /* Finished. */ /* Complete change set. */ rem_node((node_t *)head); mm_free(mm, head); } else { /* Packet full or other error. */ break; } } /* Append SOA on last packet. */ if (ret == KNOT_EOK) { ret = knot_pkt_put(pkt, 0, &soa_rr, KNOT_PF_NOTRUNC); } /* Update counters. */ xfer->npkts += 1; xfer->nbytes += pkt->size; return ret; }
void test_list_find_middle( ) { list_type *list; list_type *ptr; printf("list_find (middle): "); list = create_testing(); ptr = list_find(list, (void *) 'i'); if (ptr == list->last->prev->prev) { printf("ok\n"); } else { printf("failed!\nGot %p, expected %p!\n", (void *) ptr, (void *) list->last->prev->prev); } EMPTY_LIST(list); }
void test_list_find_first( ) { list_type *list; list_type *ptr; printf("list_find (first): "); list = create_testing(); ptr = list_find(list, (void *) 't'); if (ptr == list) { printf("ok\n"); } else { printf("failed!\nGot %p, expected %p!\n", (void *) ptr, (void *) list); } EMPTY_LIST(list); }
/* * As clients maybe malicious or don't use protocol correctly, we cannot * simply unmap memory here. We guarantee that fault can only happen within * the malicious component, but for other components, they either receive a * NULL pointer from cbuf2buf or see wrong data. No fault happen in other * components. See details in cbuf_unmap_prepare */ static int cbuf_free_unmap(struct cbuf_comp_info *cci, struct cbuf_info *cbi) { struct cbuf_maps *m = &cbi->owner, *next; struct cbuf_bin *bin; void *ptr = cbi->mem; unsigned long off, size = cbi->size; if (cbuf_unmap_prepare(cbi)) return 1; /* Unmap all of the pages from the clients */ for (off = 0 ; off < size ; off += PAGE_SIZE) { mman_revoke_page(cos_spd_id(), (vaddr_t)ptr + off, 0); } /* * Deallocate the virtual address in the client, and cleanup * the memory in this component */ m = FIRST_LIST(&cbi->owner, next, prev); while (m != &cbi->owner) { next = FIRST_LIST(m, next, prev); REM_LIST(m, next, prev); valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); free(m); m = next; } valloc_free(cos_spd_id(), m->spdid, (void*)m->addr, size/PAGE_SIZE); /* deallocate/unlink our data-structures */ page_free(ptr, size/PAGE_SIZE); cmap_del(&cbufs, cbi->cbid); cci->allocated_size -= size; bin = cbuf_comp_info_bin_get(cci, size); if (EMPTY_LIST(cbi, next, prev)) { bin->c = NULL; } else { if (bin->c == cbi) bin->c = cbi->next; REM_LIST(cbi, next, prev); } free(cbi); return 0; }
static void rd_remove(vaddr_t addr) { struct rec_data_mm *head = NULL, *alias_rd = NULL; struct parent_rec_data_mm *parent_rd = NULL; assert(addr); parent_rd = parent_rdmm_lookup(addr); if (parent_rd && (head = parent_rd->head)) { // there is alias from this addr while (!EMPTY_LIST(head, next, prev)) { alias_rd = FIRST_LIST(head, next, prev); assert(alias_rd); /* printc("cli: remove alias %p\n", alias_rd->d_addr); */ REM_LIST(alias_rd, next, prev); } } return; }
int periodic_wake_wait(spdid_t spdinv) { spdid_t spdid = cos_spd_id(); struct thread_event *te; u16_t tid = cos_get_thd_id(); long long t; TAKE(spdid); te = te_pget(tid); if (NULL == te) BUG(); if (!(te->flags & TE_PERIODIC)) goto err; assert(!EMPTY_LIST(te, next, prev)); te->flags |= TE_BLOCKED; rdtscll(t); if (te->missed) { /* we're late */ long long diff; assert(te->completion); diff = (t - te->completion); te->lateness_tot += diff; //te->samples++; te->miss_lateness_tot += diff; //te->miss_samples++; te->completion = 0; } else { /* on time! */ te->completion = t; } RELEASE(spdid); if (-1 == sched_block(spdid, 0)) { prints("fprr: sched block failed in timed_event_periodic_wait."); } return 0; err: RELEASE(spdid); return -1; }
static void __mapping_destroy(struct mapping *m) { struct comp_vas *cv; int idx; assert(m); assert(EMPTY_LIST(m, _s, s_)); assert(m->p == NULL && m->c == NULL); cv = cvas_lookup(m->spdid); assert(cv && cv->pages); assert(m == cvect_lookup(cv->pages, m->addr >> PAGE_SHIFT)); cvect_del(cv->pages, m->addr >> PAGE_SHIFT); cvas_deref(cv); idx = cos_mmap_cntl(COS_MMAP_REVOKE, 0, m->spdid, m->addr, 0); assert(idx == frame_index(m->f)); frame_deref(m->f); cslab_free_mapping(m); }
int periodic_wake_remove(spdid_t spdinv, unsigned short int tid) { spdid_t spdid = cos_spd_id(); struct thread_event *te; TAKE(spdid); te = te_pget(tid); if (NULL == te) BUG(); if (!(te->flags & TE_PERIODIC)) goto err; assert(!EMPTY_LIST(te, next, prev)); REM_LIST(te, next, prev); te->flags = 0; RELEASE(spdid); return 0; err: RELEASE(spdid); return -1; }
int __sg_sched_reflect(spdid_t spdid, int src_spd, int cnt) { struct blocked_thd *blk_thd; int ret = 0; /* printc("scheduler server side stub (thd %d)\n", cos_get_thd_id()); */ assert(src_spd); cos_sched_lock_take(); /* printc("scheduler server side stub (thd %d)\n", cos_get_thd_id()); */ /* printc("passed reflection: spd %d src_spd %d\n", spdid, src_spd); */ if (!bthds[spdid].next) goto done; if (EMPTY_LIST(&bthds[spdid], next, prev)) goto done; for (blk_thd = FIRST_LIST(&bthds[spdid], next, prev); blk_thd != &bthds[spdid]; blk_thd = FIRST_LIST(blk_thd, next, prev)){ printc("(cnt)blocked thds %d\n", blk_thd->id); cos_sched_lock_release(); sched_wakeup(spdid, blk_thd->id); cos_sched_lock_take(); } /* if (cnt == 1) { */ /* for (blk_thd = FIRST_LIST(&bthds[spd], next, prev); */ /* blk_thd != &bthds[spd]; */ /* blk_thd = FIRST_LIST(blk_thd, next, prev)){ */ /* printc("(cnt)blocked thds %d\n", blk_thd->id); */ /* ret++; */ /* } */ /* } else { */ /* blk_thd = FIRST_LIST(&bthds[spd], next, prev); */ /* if (!EMPTY_LIST(blk_thd, next, prev)) REM_LIST(blk_thd, next, prev); */ /* ret = blk_thd->id; */ /* } */ done: cos_sched_lock_release(); return ret; }
/** * config_parse - parse a configuration * @c: configuration * * config_parse() reads input by calling a hook function pointed to * by @cf_read_hook and parses it according to the configuration * grammar. It also calls all the preconfig and postconfig hooks * before, resp. after parsing. * * Result: 1 if the config has been parsed successfully, 0 if any * error has occurred (such as anybody calling cf_error()) and * the @err_msg field has been set to the error message. */ int config_parse(struct config *c) { DBG("Parsing configuration file `%s'\n", c->file_name); new_config = c; cfg_mem = c->mem; if (setjmp(conf_jmpbuf)) return 0; cf_lex_init(0, c); sysdep_preconfig(c); protos_preconfig(c); rt_preconfig(c); cf_parse(); protos_postconfig(c); if (EMPTY_LIST(c->protos)) cf_error("No protocol is specified in the config file"); #ifdef IPV6 if (!c->router_id) cf_error("Router ID must be configured manually on IPv6 routers"); #endif return 1; }
static inline void block_ser_if_client_fault_notification(int spdid) { struct track_block *tb; do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); if (!tracking_block_list[spdid].next) goto done; if (EMPTY_LIST(&tracking_block_list[spdid], next, prev)) goto done; for (tb = FIRST_LIST(&tracking_block_list[spdid], next, prev); tb != &tracking_block_list[spdid]; tb = FIRST_LIST(tb, next, prev)) { do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); lock_component_release(spdid, tb->lock_id); do { if (sched_component_take(cos_spd_id())) BUG(); } while (0); } done: do { if (sched_component_release(cos_spd_id())) BUG(); } while (0); return; }
int event_update(conf_t *conf, zone_t *zone) { assert(zone); /* Process update list - forward if zone has master, or execute. */ updates_execute(conf, zone); /* Trim extra heap. */ mem_trim(); /* Replan event if next update waiting. */ pthread_mutex_lock(&zone->ddns_lock); const bool empty = EMPTY_LIST(zone->ddns_queue); pthread_mutex_unlock(&zone->ddns_lock); if (!empty) { zone_events_schedule(zone, ZONE_EVENT_UPDATE, ZONE_EVENT_NOW); } return KNOT_EOK; }
void ospf_lsack_send(struct ospf_neighbor *n, int queue) { struct ospf_packet *op; struct ospf_lsack_packet *pk; u16 len, i = 0; struct ospf_lsa_header *h; struct lsah_n *no; struct ospf_iface *ifa = n->ifa; struct proto *p = &n->ifa->oa->po->proto; if (EMPTY_LIST(n->ackl[queue])) return; pk = (struct ospf_lsack_packet *) ifa->sk->tbuf; op = (struct ospf_packet *) ifa->sk->tbuf; ospf_pkt_fill_hdr(n->ifa, pk, LSACK_P); h = pk->lsh; while (!EMPTY_LIST(n->ackl[queue])) { no = (struct lsah_n *) HEAD(n->ackl[queue]); memcpy(h + i, &no->lsa, sizeof(struct ospf_lsa_header)); DBG("Iter %u ID: %R, RT: %R, Type: %04x\n", i, ntohl((h + i)->id), ntohl((h + i)->rt), (h + i)->type); i++; rem_node(NODE no); mb_free(no); if ((i * sizeof(struct ospf_lsa_header) + sizeof(struct ospf_lsack_packet)) > ospf_pkt_maxsize(n->ifa)) { if (!EMPTY_LIST(n->ackl[queue])) { len = sizeof(struct ospf_lsack_packet) + i * sizeof(struct ospf_lsa_header); op->length = htons(len); DBG("Sending and continuing! Len=%u\n", len); OSPF_PACKET(ospf_dump_lsack, (struct ospf_lsack_packet *) ifa->sk->tbuf, "LSACK packet sent via %s", ifa->iface->name); if (ifa->type == OSPF_IT_BCAST) { if ((ifa->state == OSPF_IS_DR) || (ifa->state == OSPF_IS_BACKUP)) ospf_send_to(ifa, AllSPFRouters); else ospf_send_to(ifa, AllDRouters); } else { if ((ifa->state == OSPF_IS_DR) || (ifa->state == OSPF_IS_BACKUP)) ospf_send_to_agt(ifa, NEIGHBOR_EXCHANGE); else ospf_send_to_bdr(ifa); } ospf_pkt_fill_hdr(n->ifa, pk, LSACK_P); i = 0; } } } len = sizeof(struct ospf_lsack_packet) + i * sizeof(struct ospf_lsa_header); op->length = htons(len); DBG("Sending! Len=%u\n", len); OSPF_PACKET(ospf_dump_lsack, (struct ospf_lsack_packet *) ifa->sk->tbuf, "LSACK packet sent via %s", ifa->iface->name); if (ifa->type == OSPF_IT_BCAST) { if ((ifa->state == OSPF_IS_DR) || (ifa->state == OSPF_IS_BACKUP)) ospf_send_to(ifa, AllSPFRouters); else ospf_send_to(ifa, AllDRouters); } else ospf_send_to_agt(ifa, NEIGHBOR_EXCHANGE); }