mpls_return_enum ldp_fec_find_nexthop_index(ldp_fec *f, int index, ldp_nexthop **n) { ldp_nexthop *nh = NULL; MPLS_ASSERT(f); if (index > 0) { /* because we sort our inserts by index, this lets us know if we've "walked" past the end of the list */ nh = MPLS_LIST_TAIL(&f->nh_root); if (!nh || nh->index < index) { *n = NULL; return MPLS_END_OF_LIST; } nh = MPLS_LIST_HEAD(&f->nh_root); do { if (nh->index == index) { *n = nh; return MPLS_SUCCESS; } } while((nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec))); } *n = NULL; return MPLS_FAILURE; }
ldp_attr *ldp_attr_find_upstream_state_any2(ldp_global * g, ldp_fec * f, ldp_lsp_state state) { ldp_attr *attr = NULL; ldp_fs *fs = NULL; fs = MPLS_LIST_HEAD(&f->fs_root_us); while (fs != NULL) { attr = MPLS_LIST_HEAD(&fs->attr_root); while (attr != NULL) { if (attr->state == state) { return attr; } attr = MPLS_LIST_NEXT(&fs->attr_root, attr, _fs); } fs = MPLS_LIST_NEXT(&f->fs_root_us, fs, _fec); } return NULL; }
int ldp_attr_num_us2ds(ldp_attr * ds) { ldp_attr *attr = NULL; int count = 0; attr = MPLS_LIST_HEAD(&ds->us_attr_root); while (attr) { count++; attr = MPLS_LIST_NEXT(&ds->us_attr_root, attr, _ds_attr); } return count; }
ldp_nexthop *ldp_nexthop_for_fec_session(ldp_fec *fec, ldp_session *s) { ldp_nexthop *nh = MPLS_LIST_HEAD(&fec->nh_root); ldp_session *sp; while (nh) { sp = ldp_session_for_nexthop(nh); if (sp && (sp->index == s->index)) { return nh; } nh = MPLS_LIST_NEXT(&fec->nh_root, nh, _fec); } return NULL; }
ldp_session *ldp_get_next_hop_session_for_fec2(ldp_fec * f, ldp_nexthop *nh) { ldp_session *session = NULL; /* * find the info about the next hop for this FEC */ if (nh->addr && nh->addr->session_root.count > 0) { session = mpls_link_list_head_data(&nh->addr->session_root); } else if (nh->iff && nh->iff->is_p2p == MPLS_BOOL_TRUE && &nh->iff->entity) { ldp_adj *adj = MPLS_LIST_HEAD(&nh->iff->entity->adj_root); session = adj ? adj->session : NULL; } return session; }
static ldp_attr *_ldp_attr_find_upstream_state(ldp_attr_list *us_list, ldp_lsp_state state) { if (us_list != NULL) { ldp_attr *us_attr = MPLS_LIST_HEAD(us_list); while (us_attr != NULL) { if (us_attr->state == state) { return us_attr; } us_attr = MPLS_LIST_NEXT(us_list, us_attr, _fs); } } return NULL; }
static ldp_attr *_ldp_attr_find_downstream_state(ldp_attr_list *ds_list, ldp_lsp_state state) { if (ds_list != NULL) { ldp_attr *ds_attr = MPLS_LIST_HEAD(ds_list); while (ds_attr != NULL) { if (ds_attr->state == state) { return ds_attr; } ds_attr = MPLS_LIST_NEXT(ds_list, ds_attr, _fs); } } return NULL; }
ldp_nexthop *ldp_fec_nexthop_find(ldp_fec *f, mpls_nexthop *n) { ldp_nexthop *nh = NULL; MPLS_ASSERT(f && n); nh = MPLS_LIST_HEAD(&f->nh_root); while (nh) { if (!mpls_nexthop_compare(&nh->info, n)) { return nh; } nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec); } return NULL; }
mpls_return_enum ldp_fec_process_change(ldp_global * g, ldp_fec * f, ldp_nexthop *nh, ldp_nexthop *nh_old, ldp_session *nh_session_old) { ldp_session *peer = NULL; ldp_attr *us_attr = NULL; ldp_attr *ds_attr = NULL; ldp_session *nh_session = NULL; LDP_ENTER(g->user_data, "ldp_fec_process_change"); if (!nh_session_old) { nh_session_old = ldp_session_for_nexthop(nh_old); } /* * NH 1-5 decide if we need to release an existing mapping */ ds_attr = ldp_attr_find_downstream_state2(g, nh_session_old, f, LDP_LSP_STATE_MAP_RECV); if (!ds_attr) { /* NH.1 */ goto Detect_Change_Fec_Next_Hop_6; } if (ds_attr->ingress == MPLS_BOOL_TRUE) { #if MPLS_USE_LSR lsr_ftn ftn; ftn.outsegment_index = ds_attr->outlabel->info.handle; memcpy(&ftn.fec, &f->info, sizeof(mpls_fec)); lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_DEL); #else mpls_mpls_fec2out_del(g->mpls_handle, &f->info, &ds_attr->outlabel->info); #endif ds_attr->ingress = MPLS_BOOL_FALSE; ds_attr->outlabel->merge_count--; } if (g->label_retention_mode == LDP_RETENTION_LIBERAL) { /* NH.3 */ ldp_attr *us_temp; us_attr = MPLS_LIST_HEAD(&ds_attr->us_attr_root); while (us_attr) { /* need to walk the list in such a way as not to * "pull the rug out from under me self" */ us_temp = MPLS_LIST_NEXT(&ds_attr->us_attr_root, us_attr, _ds_attr); if (us_attr->state == LDP_LSP_STATE_MAP_SENT) { ldp_inlabel_del_outlabel(g, us_attr->inlabel); /* NH.2 */ ldp_attr_del_us2ds(us_attr, ds_attr); } us_attr = us_temp; } goto Detect_Change_Fec_Next_Hop_6; } ldp_label_release_send(g, nh_session_old, ds_attr, LDP_NOTIF_NONE); /* NH.4 */ ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); /* NH.2,5 */ Detect_Change_Fec_Next_Hop_6: /* * NH 6-9 decides is we need to send a label request abort */ ds_attr = ldp_attr_find_downstream_state2(g, nh_session_old, f, LDP_LSP_STATE_REQ_SENT); if (ds_attr) { /* NH.6 */ if (g->label_retention_mode != LDP_RETENTION_CONSERVATIVE) { /* NH.7 */ /* NH.8,9 */ if (ldp_label_abort_send(g, nh_session_old, ds_attr) != MPLS_SUCCESS) { return MPLS_FAILURE; } } } /* * NH 10-12 decides if we can use a mapping from our database */ if (!(nh_session = ldp_get_next_hop_session_for_fec2(f,nh))){ goto Detect_Change_Fec_Next_Hop_16; } ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); if (!ds_attr) { /* NH.11 */ goto Detect_Change_Fec_Next_Hop_13; } if (ldp_label_mapping_process(g, nh_session, NULL, NULL, ds_attr, f) != MPLS_SUCCESS) { /* NH.12 */ return MPLS_FAILURE; } goto Detect_Change_Fec_Next_Hop_20; Detect_Change_Fec_Next_Hop_13: /* * NH 13-15 decides if we need to make a label request */ if (nh_session->oper_distribution_mode == LDP_DISTRIBUTION_ONDEMAND && g->label_retention_mode == LDP_RETENTION_CONSERVATIVE) { /* NH.14-15 */ if (ldp_label_request_for_xc(g, nh_session, &f->info, NULL, &ds_attr) != MPLS_SUCCESS) { return MPLS_FAILURE; } } goto Detect_Change_Fec_Next_Hop_20; Detect_Change_Fec_Next_Hop_16: peer = MPLS_LIST_HEAD(&g->session); while (peer) { if (peer->state == LDP_STATE_OPERATIONAL) { us_attr = ldp_attr_find_upstream_state2(g, peer, f, LDP_LSP_STATE_MAP_SENT); if (us_attr) { /* NH.17 */ if (ldp_label_withdraw_send(g, peer, us_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { /* NH.18 */ ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE); return MPLS_FAILURE; } } } peer = MPLS_LIST_NEXT(&g->session, peer, _global); } Detect_Change_Fec_Next_Hop_20: LDP_EXIT(g->user_data, "ldp_fec_process_change"); return MPLS_SUCCESS; }
mpls_return_enum ldp_fec_process_add(ldp_global * g, ldp_fec * f, ldp_nexthop *nh, ldp_session *nh_session) { ldp_session *peer = NULL; ldp_attr *ds_attr = NULL; ldp_attr *us_attr = NULL; mpls_bool egress = MPLS_BOOL_FALSE; ldp_outlabel *out; LDP_ENTER(g->user_data, "ldp_fec_process_add"); /* * find the info about the next hop for this FEC */ if (!nh_session) { nh_session = ldp_session_for_nexthop(nh); } if (nh_session) { ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); if (ds_attr && !ds_attr->outlabel) { out = ldp_outlabel_create_complete(g, nh_session, ds_attr, nh); if (!out) { return MPLS_FAILURE; } ds_attr->outlabel = out; } } /* * for every peer except the nh hop peer, check to see if we need to * send a mapping */ peer = MPLS_LIST_HEAD(&g->session); while (peer != NULL) { /* FEC.1 */ if ((peer->state != LDP_STATE_OPERATIONAL) || (nh_session && peer->index == nh_session->index)) { goto next_peer; } /* have I already sent a mapping for FEC to peer */ if ((us_attr = ldp_attr_find_upstream_state2(g, peer, f, LDP_LSP_STATE_MAP_SENT))) { /* yep, don't send another */ if (ds_attr) { if (ldp_inlabel_add_outlabel(g, us_attr->inlabel, ds_attr->outlabel) != MPLS_SUCCESS) { return MPLS_FAILURE; } } goto next_peer; } if (peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) { if (g->lsp_control_mode == LDP_CONTROL_INDEPENDENT) { us_attr = ldp_attr_find_upstream_state2(g, peer, f, LDP_LSP_STATE_REQ_RECV); /* FEC.1.DUI3,4 */ if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) != MPLS_SUCCESS) { if (!us_attr->in_tree) { ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE); } goto next_peer; } } else { /* *LDP_CONTROL_ORDERED */ if (ds_attr || egress == MPLS_BOOL_TRUE) { /* FEC.1.DUO2 */ if (!(us_attr = ldp_attr_create(&f->info))) { return MPLS_FAILURE; } /* FEC.1.DUO3-4 */ if ((egress == MPLS_BOOL_TRUE) && (mpls_policy_egress_check( g->user_data, &f->info, &nh->info) == MPLS_BOOL_TRUE)) { goto next_peer; } if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) != MPLS_SUCCESS) { return MPLS_FAILURE; } } } } next_peer: peer = MPLS_LIST_NEXT(&g->session, peer, _global); } if (ds_attr) { /* FEC.2 */ if (ldp_label_mapping_process(g, nh_session, NULL, NULL, ds_attr, f) == MPLS_FAILURE) { /* FEC.5 */ return MPLS_FAILURE; } return MPLS_SUCCESS; } /* * LDP_DISTRIBUTION_ONDEMAND */ /* FEC.3 */ if (nh_session && nh_session->oper_distribution_mode == LDP_DISTRIBUTION_ONDEMAND) { /* assume we're always "request when needed" */ ds_attr = NULL; if (ldp_label_request_for_xc(g, nh_session, &f->info, NULL, &ds_attr) == MPLS_FAILURE) { /* FEC.4 */ return MPLS_FAILURE; } } LDP_EXIT(g->user_data, "ldp_fec_process_add"); return MPLS_SUCCESS; /* FEC.6 */ }
mpls_return_enum ldp_label_request_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * us_attr, ldp_fec * f) { ldp_session *nh_session = NULL; ldp_nexthop *nh = NULL; ldp_attr_list *us_list = NULL; mpls_bool egress = MPLS_BOOL_FALSE; ldp_attr *ds_attr = NULL; ldp_attr *us_temp = NULL; if (Check_Received_Attributes(g, s, us_attr, MPLS_LBLREQ_MSGTYPE) != MPLS_SUCCESS) { /* LRp.1 */ goto LRq_13; } if (f == NULL) { ldp_notif_send(g, s, us_attr, LDP_NOTIF_NO_ROUTE); /* LRq.5 */ goto LRq_13; } /* just find one valid nexthop session for now */ nh = MPLS_LIST_HEAD(&f->nh_root); while (nh) { nh_session = ldp_session_for_nexthop(nh); if (nh_session) { break; } nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec); } if (!nh_session) { egress = MPLS_BOOL_TRUE; } if (nh_session != NULL && s->index == nh_session->index) { /* LRq.3 */ ldp_notif_send(g, s, us_attr, LDP_NOTIF_LOOP_DETECTED); /* LRq.4 */ goto LRq_13; } if ((us_list = ldp_attr_find_upstream_all2(g, s, f)) != NULL) { us_temp = MPLS_LIST_HEAD(us_list); while (us_temp != NULL) { if (us_temp->state == LDP_LSP_STATE_REQ_RECV && /* LRq.6 */ us_temp->msg_id == us_attr->msg_id) { /* LRq.7 */ goto LRq_13; } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } us_attr->state = LDP_LSP_STATE_REQ_RECV; /* LRq.8 */ if (ldp_attr_insert_upstream2(g, s, us_attr, f) != MPLS_SUCCESS) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_ERROR, "Couldn't insert recv attributes in tree\n"); goto ldp_label_request_process_error; } if (nh_session) { ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); } else { ds_attr = NULL; } if (g->lsp_control_mode == LDP_CONTROL_INDEPENDENT) { /* LRq.9 */ if (ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } if (egress == MPLS_BOOL_TRUE || ds_attr) { goto LRq_11; } } else { if ((!(egress == MPLS_BOOL_TRUE || ds_attr)) || (g->label_merge == MPLS_BOOL_FALSE)) { goto LRq_10; } if (ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } goto LRq_11; } LRq_10: ds_attr = NULL; if (ldp_label_request_for_xc(g, nh_session, &f->info, us_attr, &ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } LRq_11: /* the work done by LRq_11 is handled in ldp_label_mapping_with_xc() */ LRq_13: if (ds_attr != NULL && ds_attr->in_tree == MPLS_BOOL_FALSE) { ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); } return MPLS_SUCCESS; ldp_label_request_process_error: return MPLS_FAILURE; }
void ldp_label_request_initial_callback(mpls_timer_handle timer, void *extra, mpls_cfg_handle handle) { ldp_session *s = (ldp_session *)extra; ldp_global *g = (ldp_global*)handle; ldp_nexthop *nh = NULL; ldp_fec *f = NULL; ldp_session *nh_session = NULL; mpls_bool done = MPLS_BOOL_FALSE; ldp_attr *attr = NULL; ldp_fs *fs = NULL; ldp_attr *ds_attr = NULL; LDP_ENTER(g->user_data, "ldp_label_request_initial_callback"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_TIMER, "Initial Label Request Callback fired: session(%d)\n", s->index); mpls_lock_get(g->global_lock); mpls_timer_stop(g->timer_handle, timer); if ((f = MPLS_LIST_HEAD(&g->fec))) { do { if ((nh = MPLS_LIST_HEAD(&f->nh_root))) { do { switch (f->info.type) { case MPLS_FEC_PREFIX: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing prefix FEC: %08x/%d ", f->info.u.prefix.network.u.ipv4, f->info.u.prefix.length); break; case MPLS_FEC_HOST: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing host FEC: %08x ", f->info.u.host.u.ipv4); break; case MPLS_FEC_L2CC: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing L2CC FEC: %d %d %d ", f->info.u.l2cc.connection_id, f->info.u.l2cc.group_id, f->info.u.l2cc.type); break; default: MPLS_ASSERT(0); } if (nh->info.type & MPLS_NH_IP) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %08x\n", nh->addr->address.u.ipv4); } if (nh->info.type & MPLS_NH_IF) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %p\n", nh->iff->handle); } /* check to see if export policy allows us to 'see' this route */ if (mpls_policy_export_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_DEBUG, "Rejected by export policy\n"); continue; } /* find the next hop session corresponding to this FEC */ nh_session = ldp_session_for_nexthop(nh); /* do we have a valid next hop session, and is the nexp hop session * this session? */ if ((!nh_session) || (nh_session->index != s->index)) { continue; } /* have we already sent a label request to this peer for this FEC? */ if (ldp_attr_find_downstream_state(g, s, &f->info, LDP_LSP_STATE_REQ_SENT)) { continue; } /* clear out info from the last FEC */ ds_attr = NULL; /* jleu: duplicate code from ldp_attr_find_upstream_state_any */ fs = MPLS_LIST_HEAD(&f->fs_root_us); while (fs) { attr = MPLS_LIST_HEAD(&fs->attr_root); while (attr) { if (attr->state == LDP_LSP_STATE_REQ_RECV || attr->state == LDP_LSP_STATE_MAP_SENT) { if (!ds_attr) { /* this is not neccessarily going to be XC'd to something */ ldp_label_request_for_xc(g, s, &f->info, attr, &ds_attr); } } attr = MPLS_LIST_NEXT(&fs->attr_root, attr, _fs); } fs = MPLS_LIST_NEXT(&f->fs_root_us, fs, _fec); } if (!ds_attr) { /* * we did not find any received requests or sent mappings so * send a request and xc it to nothing */ ldp_label_request_for_xc(g, s, &f->info, NULL, &ds_attr); } } while ((nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec))); } } while ((f = MPLS_LIST_NEXT(&g->fec, f, _global))); done = MPLS_BOOL_TRUE; } if (done == MPLS_BOOL_TRUE) { mpls_timer_delete(g->timer_handle, timer); MPLS_REFCNT_RELEASE(s, ldp_session_delete); s->initial_distribution_timer = (mpls_timer_handle) 0; } else { mpls_timer_start(g->timer_handle, timer, MPLS_TIMER_ONESHOT); /* need to mark the session with where it left off */ } mpls_lock_release(g->global_lock); LDP_EXIT(g->user_data, "ldp_label_request_initial_callback"); }
void ldp_attr_remove_complete(ldp_global * g, ldp_attr * attr, mpls_bool complete) { ldp_session *session = attr->session; ldp_outlabel *out = NULL; ldp_inlabel *in = NULL; ldp_attr *us_temp = NULL; mpls_fec fec; int i; switch (attr->state) { case LDP_LSP_STATE_MAP_RECV: if (attr->ingress == MPLS_BOOL_TRUE) { out = attr->outlabel; MPLS_ASSERT(out != NULL); while ((in = MPLS_LIST_HEAD(&out->inlabel_root)) != NULL) { ldp_inlabel_del_outlabel(g, in); } if (out->merge_count > 0) { for (i = 0; i < attr->fecTlv.numberFecElements; i++) { fec_tlv2mpls_fec(&attr->fecTlv, i, &fec); out->merge_count--; #if MPLS_USE_LSR { lsr_ftn ftn; memcpy(&ftn.fec, &fec, sizeof(mpls_fec)); ftn.outsegment_index = out->info.handle; lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_DEL); } #else mpls_mpls_fec2out_del(g->mpls_handle, &fec, &out->info); #endif } } MPLS_ASSERT(out->merge_count == 0); ldp_attr_del_outlabel(g, attr); ldp_session_del_outlabel(g, session, out); } while ((us_temp = MPLS_LIST_HEAD(&attr->us_attr_root)) != NULL) { ldp_attr_del_us2ds(g, us_temp, attr); } ldp_attr_delete_downstream(g, session, attr); break; case LDP_LSP_STATE_MAP_SENT: in = attr->inlabel; out = in->outlabel; if (in->reuse_count == 1 && out) { ldp_inlabel_del_outlabel(g, in); } ldp_attr_del_inlabel(g, attr); ldp_attr_delete_upstream(g, session, attr); ldp_attr_del_us2ds(g, attr, attr->ds_attr); ldp_session_del_inlabel(g, session, in); break; case LDP_LSP_STATE_ABORT_SENT: case LDP_LSP_STATE_NOTIF_SENT: case LDP_LSP_STATE_REQ_RECV: case LDP_LSP_STATE_WITH_SENT: case LDP_LSP_STATE_NO_LABEL_RESOURCE_SENT: { ldp_attr_del_us2ds(g, attr, attr->ds_attr); ldp_attr_delete_upstream(g, session, attr); break; } case LDP_LSP_STATE_ABORT_RECV: case LDP_LSP_STATE_NOTIF_RECV: case LDP_LSP_STATE_REQ_SENT: case LDP_LSP_STATE_WITH_RECV: case LDP_LSP_STATE_NO_LABEL_RESOURCE_RECV: { while ((us_temp = MPLS_LIST_HEAD(&attr->us_attr_root)) != NULL) { ldp_attr_del_us2ds(g, us_temp, attr); } ldp_attr_delete_downstream(g, session, attr); break; } } }
mpls_return_enum ldp_label_mapping_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f) { mpls_return_enum retval = MPLS_SUCCESS; ldp_session *peer = NULL; ldp_attr_list *us_list = NULL; ldp_attr_list *ds_list = NULL; ldp_attr *ds_attr = NULL; ldp_attr *ds_temp = NULL; ldp_attr *us_attr = NULL; ldp_attr *us_temp = NULL; ldp_attr dumb_attr; ldp_nexthop *nh = NULL; ldp_outlabel *out = NULL; mpls_bool requested = MPLS_BOOL_FALSE; ldp_attr *existing = NULL; mpls_bool need_request = MPLS_BOOL_FALSE; LDP_ENTER(g->user_data, "ldp_label_mapping_process"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Label Mapping Recv from %s for %08x/%d\n", s->session_name, r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen); if ((ds_attr = ldp_attr_find_downstream_state2(g, s, f, LDP_LSP_STATE_REQ_SENT)) != NULL) { /* LMp.1 */ /* just remove the req from the tree, we will use the r_attr sent to us */ ldp_attr_delete_downstream(g, s, ds_attr); requested = MPLS_BOOL_TRUE; } else { requested = MPLS_BOOL_FALSE; } ds_attr = r_attr; ds_attr->state = LDP_LSP_STATE_MAP_RECV; /* LMp.2 */ /* * ds_attr is the mapping we will keep and is NOT in the tree, unless * it is an update mapping ... */ if (Check_Received_Attributes(g, s, ds_attr, MPLS_LBLMAP_MSGTYPE) == MPLS_SUCCESS) { /* LMp.3 */ goto LMp_9; } /* * A loop was detected */ if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) { ds_temp = MPLS_LIST_HEAD(ds_list); /* * check all the labels this session has received from "s" for "fec" * do we have a duplicat? */ while (ds_temp) { if ((ds_temp->state == LDP_LSP_STATE_MAP_RECV) && /* LMp.4 */ ldp_attr_is_equal(ds_temp, ds_attr, LDP_ATTR_LABEL) == /* LMp.5 */ MPLS_BOOL_TRUE) { /* remove record of the label and remove it switching */ ldp_attr_remove_complete(g, ds_temp, MPLS_BOOL_TRUE); /* LMp.6,7 */ /* * I think this is supposed to be 32 NOT 33, we need to release * it don't we? */ goto LMp_33; } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } LDP_PRINT(g->user_data, "Receive_Label_Map_8: send release"); if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_LOOP_DETECTED) != MPLS_SUCCESS) { /* LMp.8 */ retval = MPLS_FAILURE; } goto LMp_33; LMp_9: /* * No Loop Detected */ ds_temp = ldp_attr_find_downstream_state2(g, s, f, LDP_LSP_STATE_MAP_RECV); if (requested == MPLS_BOOL_TRUE || g->label_merge == MPLS_BOOL_FALSE || !ds_temp) { /* !merging then this is always a new LSP * merging w/o a recv'd mapping is a new LSP * this check comes from Note 6 */ goto LMp_11; } /* searching all recv'd attrs for matched mappings, * stop after finding 1st match */ if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) { ds_temp = MPLS_LIST_HEAD(ds_list); while (ds_temp) { if (ds_temp->state == LDP_LSP_STATE_MAP_RECV) { /* LMp.9 */ if (ldp_attr_is_equal(ds_attr, ds_temp, LDP_ATTR_LABEL) == MPLS_BOOL_TRUE) { /* LMp.10 */ /* * this mapping matches an existing mapping, but it * could contain updated attributes */ existing = ds_temp; break; } else { /* * we have been given another label for the same FEC and we * didn't request it, release it */ LDP_PRINT(g->user_data, "LMp.10 dup without req\n"); goto LMp_32; } } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } if (existing) { ldp_attr2ldp_attr(ds_attr, existing, LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH | LDP_ATTR_MSGID | LDP_ATTR_LSPID | LDP_ATTR_TRAFFIC); ds_attr = existing; /* * no need to free ds_attr, since it was not added to the tree it * will be deleted when we exit ldp_label_mapping_process(), see * ldp_state_process(). */ } /* * from this point on.... if this is an updated mapping then ds_attr * is the existing mapping which has now been update, else ds_attr * is the new mapping */ LMp_11: /* * existing ONLY has a value for updated label mapping */ nh = ldp_nexthop_for_fec_session(f,s); /* LMp.11 */ /* * the following departs from the procedure, it allows for filtering * of label mappings * * Are we configured to accept and INSTALL this mapping? */ if (mpls_policy_import_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { /* * policy has rejected it, store it away */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Label Mapping for %08x/%d from %s filtered by import policy\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name); if (existing) { ds_attr->filtered = MPLS_BOOL_TRUE; if (ds_attr->outlabel && ds_attr->outlabel->switching == MPLS_BOOL_TRUE) { /* the mapping has been filtered, but the original wasn't? */ MPLS_ASSERT(0); } } else { ds_attr->filtered = MPLS_BOOL_TRUE; if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } } goto LMp_33; } if (!nh) { /* LMp.12 */ /* * if we did not find a nh hop for this FEC that corresponded to the * MsgSource then the MsgSource is not a nexthop for the FEC */ if (g->label_retention_mode == LDP_RETENTION_CONSERVATIVE) { /* LMp.13C */ LDP_PRINT(g->user_data, "LMp.13C conservative\n"); goto LMp_32; } /* * store it away */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Session %s is not a valid nexthop for %08x/%d\n", s->session_name, r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen); if (!existing) { /* LMp.13L */ if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } } goto LMp_33; } /* * this is slightly different form the procedure, we can still be * transit for a FEC we are not configured to be ingress for. * Either way we only need to do the "install for fwd/switching" * only once. We could arrive here multiple times due to updates, * only install it the first time */ if ((!existing) || (!existing->outlabel)) { /* * we haven't installed it yet. * Either new (!existing), or a result of a "Detect FEC Nexthop Change" * and we had this mapping in our database (!existing->outlabel)) */ if (!(out = ldp_outlabel_create_complete(g, s, ds_attr, nh))) { LDP_PRINT(g->user_data, "LMp.15 failure creating outlabel\n"); goto LMp_32; } LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING, "Out Label Added\n"); } /* * are we configured to act as ingress for this FEC? */ if (mpls_policy_ingress_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_TRUE) { /* LMp.14 */ /* * yep, bind the label to the FEC */ if (ds_attr->ingress != MPLS_BOOL_TRUE) { #if MPLS_USE_LSR lsr_ftn ftn; ftn.outsegment_index = ds_attr->outlabel->info.handle; memcpy(&ftn.fec, &f->info, sizeof(mpls_fec)); lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_ADD|LSR_FTN_CFG_FEC| LSR_FTN_CFG_OUTSEGMENT); #else mpls_mpls_fec2out_add(g->mpls_handle, &f->info, &ds_attr->outlabel->info); #endif ds_attr->ingress = MPLS_BOOL_TRUE; ds_attr->outlabel->merge_count++; LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Acting as ingress for %08x/%d from %s\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name); } } /* create a set of attrs that we will fill and compare against * if this mapping were to be propogate these are the attrs it would have * by comparing what we did sent in the past to these, we con figure out * if we need to send an updated mapping */ memset(&dumb_attr, 0, sizeof(ldp_attr)); mpls_fec2fec_tlv(&f->info, &dumb_attr.fecTlv, 0); dumb_attr.fecTlvExists = 1; dumb_attr.fecTlv.numberFecElements = 1; /* * by definition (we received a label mapping that will be used) this * LSR is _not_ the egress, so calculate a hop and path based on the * mapping we received. We will compare this with mapping that have * already been sent. If they differ, we will send an updated mapping */ Prepare_Label_Mapping_Attributes(g, s, &f->info, ds_attr, &dumb_attr, MPLS_BOOL_TRUE, MPLS_BOOL_TRUE, MPLS_BOOL_FALSE); if (!existing) { /* * this is the first time we've seen this mapping, add it to the database. * all future updates will modify this entry in place */ /* LMp.16 */ printf("!!!LMp16!!!\n"); if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } peer = MPLS_LIST_HEAD(&g->session); while (peer) { /* LMp.17 */ if (peer->state != LDP_STATE_OPERATIONAL) { goto next_peer; } /* * it is just as easy to walk the list of all upstream attr for this * peer as it is to the individual check to see if we have sent a * label mapping for this FEC LSP */ // #error this whole section is f ed /* LMp.22 - 27 */ if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) { /* LMp.23 */ us_temp = MPLS_LIST_HEAD(us_list); while (us_temp) { /* * if we have sent a label mapping for the FEC and that label mapping * was an done in independent mode or it is part of an LSP created * due as part of an existing received label mapping */ /* LMp.18 */ if (us_temp->state == LDP_LSP_STATE_MAP_SENT) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Already sent mapping for %08x/%d to %s\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, peer->session_name); if ((!existing) || (existing->index == us_temp->ds_attr->index)) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Part of same LSP\n"); /* are the received attrs the same as the ones we've already sent */ if (ldp_attr_is_equal(us_temp, &dumb_attr, LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH) != MPLS_BOOL_TRUE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Propogating updated attrs\n"); /* send an updated label mapping */ if (ldp_label_mapping_with_xc(g, us_temp->session, f, &us_temp, ds_attr) != MPLS_SUCCESS) { /* LMp.24-26 */ retval = MPLS_FAILURE; goto LMp_33; } } } } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } if ((peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) && (g->lsp_control_mode == LDP_CONTROL_ORDERED)) { /* LMp.19 */ /* * if we're not merging and we have multiple ORDERED DU sessions, * we will to start requesting labels after we propogate the mapping to * the first peer */ if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding, make one */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, NULL, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { /* * We're in DU more, either we're merging, or we're not merging and * this is the first peer we're propogating this mapping to */ /* LMp.20-21,30 */ us_attr = NULL; if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } /* * if we're not merging, we will need to request a label for * the next DU peer */ if (g->label_merge == MPLS_BOOL_FALSE) { need_request = MPLS_BOOL_TRUE; } } } /* LMp.28 */ while ((us_temp = ldp_attr_find_upstream_state2(g, peer, f, LDP_LSP_STATE_REQ_RECV))) { if (peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) { if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, us_temp, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if (ldp_label_mapping_with_xc(g, peer, f, &us_temp, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) { us_temp = MPLS_LIST_HEAD(ds_list); while (us_temp) { if (us_temp->state == LDP_LSP_STATE_REQ_RECV) { if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, us_temp, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if (ldp_label_mapping_with_xc(g, peer, f, &us_temp, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } /* * if we're not merging, we will need to request a label for * the next DU peer */ if (g->label_merge == MPLS_BOOL_FALSE) { need_request = MPLS_BOOL_TRUE; } } } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } } } next_peer: peer = MPLS_LIST_NEXT(&g->session, peer, _global); } LMp_33: LDP_EXIT(g->user_data, "ldp_label_mapping_process"); return retval; LMp_32: LDP_PRINT(g->user_data, "Receive_Label_Map_32: send release"); if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } LDP_EXIT(g->user_data, "ldp_label_mapping_process"); return retval; }
void ldp_label_mapping_initial_callback(mpls_timer_handle timer, void *extra, mpls_cfg_handle handle) { ldp_session *s = (ldp_session *) extra; ldp_global *g = (ldp_global*)handle; ldp_attr *ds_attr = NULL; ldp_attr *us_attr = NULL; ldp_session *nh_session = NULL; mpls_bool done = MPLS_BOOL_FALSE; ldp_fec *f; ldp_nexthop *nh; LDP_ENTER(g->user_data, "ldp_label_mapping_initial_callback"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_TIMER, "Initial Label Mapping fired: session(%d)\n", s->index); mpls_lock_get(g->global_lock); mpls_timer_stop(g->timer_handle, timer); f = MPLS_LIST_HEAD(&g->fec); while (f) { nh = MPLS_LIST_HEAD(&f->nh_root); while (nh) { switch (f->info.type) { case MPLS_FEC_PREFIX: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing prefix FEC: %08x/%d ", f->info.u.prefix.network.u.ipv4, f->info.u.prefix.length); break; case MPLS_FEC_HOST: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing host FEC: %08x ", f->info.u.host.u.ipv4); break; case MPLS_FEC_L2CC: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processingu L2CC FEC: %d %d %d ", f->info.u.l2cc.connection_id, f->info.u.l2cc.group_id, f->info.u.l2cc.type); break; default: MPLS_ASSERT(0); } if (nh->info.type & MPLS_NH_IP) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %08x\n", nh->addr->address.u.ipv4); } if (nh->info.type & MPLS_NH_IF && nh->iff) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %p\n", nh->iff->handle); } /* are we allowed to export this route from the rib */ if (mpls_policy_export_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_POLICY, "Rejected by export policy\n"); goto ldp_label_mapping_initial_callback_end_nh; } /* have we already sent a mapping for this fec to the new session? */ if ((us_attr = ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_MAP_SENT))) { /* no need to sent another mapping */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Already sent this FEC to session %d\n", s->index); goto ldp_label_mapping_initial_callback_end_nh; } if (!(nh_session = ldp_get_next_hop_session_for_fec2(f,nh))) { ds_attr = NULL; } else { if (nh_session->index == s->index) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Nexthop session(%d) == session(%d)\n", nh_session->index, s->index); goto ldp_label_mapping_initial_callback_end_nh; } ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); } if ((g->label_merge != MPLS_BOOL_TRUE) && ldp_attr_num_us2ds(ds_attr)) { /* we have a ds label, but can't use it */ ds_attr = NULL; } us_attr = NULL; if (ds_attr) { /* we can use it, merge on baby */ ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr); } else { /* we don't have a ds label */ /* we will be egress? */ if (g->lsp_control_mode == LDP_CONTROL_ORDERED) { if (mpls_policy_egress_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_TRUE) { ldp_label_mapping_with_xc(g, s, f, &us_attr, NULL); } } else { ldp_label_mapping_with_xc(g, s, f, &us_attr, NULL); } } ldp_label_mapping_initial_callback_end_nh: nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec); } f = MPLS_LIST_NEXT(&g->fec, f, _global); } done = MPLS_BOOL_TRUE; if (done == MPLS_BOOL_TRUE) { mpls_timer_delete(g->timer_handle, timer); MPLS_REFCNT_RELEASE(s, ldp_session_delete); s->initial_distribution_timer = (mpls_timer_handle) 0; } else { mpls_timer_start(g->timer_handle, timer, MPLS_TIMER_ONESHOT); /* need to mark the session with where it left off */ } mpls_lock_release(g->global_lock); LDP_EXIT(g->user_data, "ldp_label_mapping_initial_callback"); }
mpls_return_enum ldp_label_withdraw_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f) { mpls_bool label_exists = MPLS_BOOL_FALSE; ldp_attr_list *ds_list = NULL; ldp_attr *ds_attr = NULL; ldp_attr *ds_temp = NULL; ldp_attr *us_temp = NULL; ldp_nexthop *nh = NULL; mpls_return_enum retval = MPLS_SUCCESS; LDP_ENTER(g->user_data, "ldp_label_withdraw_process"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Withdraw Recv for %s\n", s->session_name); if (r_attr->genLblTlvExists || r_attr->atmLblTlvExists || r_attr->frLblTlvExists) { label_exists = MPLS_BOOL_TRUE; } else { MPLS_ASSERT(0); } if (f) { if ((ds_list = ldp_attr_find_downstream_all2(g, s, f)) != NULL) { ds_temp = MPLS_LIST_HEAD(ds_list); while (ds_temp) { if (ds_temp->state == LDP_LSP_STATE_MAP_RECV) { /* LWd.3 */ if (ldp_attr_is_equal(r_attr, ds_temp, LDP_ATTR_LABEL)) { ds_attr = ds_temp; break; } } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } if (!ds_attr) { retval = MPLS_FAILURE; LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Withdraw Recv for a non-existant mapping from %s\n",s->session_name); goto LWd_13; } /* * we want to remove it from the tree, but not delete it yet * so hold a refcnt, we will release that refcnt at the end, thus * deleting it if no one else it holding a refcnt */ MPLS_REFCNT_HOLD(ds_attr); ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); /* LWd.4 */ /* LWd.2 */ if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { retval = MPLS_FATAL; goto LWd_13; } if (g->lsp_control_mode == LDP_CONTROL_ORDERED) { /* LWd.5 */ goto LWd_8; } if (s->oper_distribution_mode != LDP_DISTRIBUTION_ONDEMAND) { /* LWd.6 */ goto LWd_13; } MPLS_ASSERT((nh = ldp_nexthop_for_fec_session(f, s))); retval = ldp_fec_process_add(g, f, nh, s); /* LWd.7 */ goto LWd_13; LWd_8: /* I can only propogate a label withdraw to the upstreams attached to the downstream found above */ us_temp = MPLS_LIST_HEAD(&ds_attr->us_attr_root); while (us_temp) { if (us_temp->state == LDP_LSP_STATE_MAP_SENT) { if (ldp_label_withdraw_send(g, us_temp->session, us_temp, LDP_NOTIF_NONE) != MPLS_SUCCESS) { /* LWd.11 */ retval = MPLS_FATAL; goto LWd_13; } } us_temp = MPLS_LIST_NEXT(&ds_attr->us_attr_root, us_temp, _ds_attr); } } else { /* JLEU: process wildcard FEC stuff here */ MPLS_ASSERT(0); } LWd_13: if (ds_attr) { MPLS_REFCNT_RELEASE2(g, ds_attr, ldp_attr_delete); } LDP_EXIT(g->user_data, "ldp_label_withdraw_process"); return retval; }