mpls_return_enum ldp_state_machine(ldp_global * g, ldp_session * session, ldp_adj * adj, ldp_entity * entity, uint32_t event, ldp_mesg * msg, mpls_dest * from) { int state = LDP_STATE_NONE; int func = 0; extern int PW_SIGNALING_FLAG;//testing mpls_return_enum retval = MPLS_FAILURE; LDP_ENTER(g->user_data, "ldp_state_machine"); if (session) { state = session->state; } else if (adj) { state = LDP_STATE_NON_EXIST; } if (state >= LDP_STATE_NONE && state <= LDP_STATE_OPERATIONAL) { if (event <= LDP_EVENT_KTIMER) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_STATE, "FSM: state %d, event %d\n", state, event); //printf("State: %d\n",state);//testing func = ldp_state_table[state][event]; //printf("Fun: %d\n",func);//testing retval = ldp_state_func[func] (g, session, adj, entity, event, msg, from); } } LDP_EXIT(g->user_data, "ldp_state_machine"); return retval; }
ldp_adj *ldp_adj_create(mpls_inet_addr * source, mpls_inet_addr * lsraddr, int labelspace, int remote_hellotime, mpls_inet_addr * remote_transport_address, uint32_t remote_csn) { ldp_adj *a = (ldp_adj *) mpls_malloc(sizeof(ldp_adj)); struct in_addr addr; if (lsraddr == NULL || source == NULL) return NULL; if (a) { memset(a, 0, sizeof(ldp_adj)); MPLS_REFCNT_INIT(a, 0); MPLS_LIST_ELEM_INIT(a, _global); MPLS_LIST_ELEM_INIT(a, _session); MPLS_LIST_ELEM_INIT(a, _entity); a->index = _ldp_adj_get_next_index(); /* these are operational values */ /* JLEU: where do I grab these values from */ /* these values are learned form the remote peer */ memcpy(&a->remote_source_address, source, sizeof(mpls_inet_addr)); memcpy(&a->remote_lsr_address, lsraddr, sizeof(mpls_inet_addr)); addr.s_addr = htonl(lsraddr->u.ipv4); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_PERIODIC, "Adj(%d) created for 0x%08x/",a->index, addr); addr.s_addr = htonl(source->u.ipv4); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_PERIODIC, "0x%08x\n", addr); if (remote_transport_address) { memcpy(&a->remote_transport_address, remote_transport_address, sizeof(mpls_inet_addr)); } else { memset(&a->remote_transport_address, 0, sizeof(mpls_inet_addr)); } a->remote_hellotime = remote_hellotime; a->remote_csn = remote_csn; a->state = MPLS_OPER_DOWN; a->role = LDP_NONE; } return a; }
mpls_return_enum ldp_label_release_send(ldp_global * g, ldp_session * s, ldp_attr * a, ldp_notif_status status) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL, "Release Sent: session(%d)\n", s->index); return ldp_label_rel_with_send(g, s, a, status, MPLS_LBLREL_MSGTYPE); }
void ldp_adj_add_session(ldp_adj * a, ldp_session * s) { MPLS_ASSERT(a && s); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_PERIODIC, "Adj(%d) bound to sesssion(%d)\n",a->index,s->index); MPLS_REFCNT_HOLD(s); a->session = s; _ldp_session_add_adj(s, a); }
mpls_return_enum ldp_label_withdraw_send(ldp_global * g, ldp_session * s, ldp_attr * us_attr, ldp_notif_status status) { us_attr->state = LDP_LSP_STATE_WITH_SENT; if (ldp_label_rel_with_send(g, s, us_attr, status, MPLS_LBLWITH_MSGTYPE) == MPLS_FAILURE) { return MPLS_FAILURE; } LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL, "Withdraw Sent: session(%d)\n", s->index); return MPLS_SUCCESS; }
mpls_return_enum ldp_label_request_send(ldp_global * g, ldp_session * s, ldp_attr * us_attr, ldp_attr ** ds_attr) { ldp_attr *ds_temp; mpls_fec fec; LDP_ENTER(g->user_data, "ldp_label_request_send"); MPLS_ASSERT(ds_attr && *ds_attr); fec_tlv2mpls_fec(&((*ds_attr)->fecTlv), 0, &fec); if ((ds_temp = ldp_attr_find_downstream_state(g, s, &fec, LDP_LSP_STATE_REQ_SENT)) != NULL) { /* SLRq.1 */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL, "Label Request Send: request already pending(%d)\n", ds_temp->index); ldp_attr_add_us2ds(us_attr, ds_temp); /* we do not need the one passed in, but make sure that the caller is using this one from here forth */ ldp_attr_remove_complete(g, *ds_attr, MPLS_BOOL_TRUE); *ds_attr = ds_temp; return MPLS_SUCCESS; } if (s->no_label_resource_recv == MPLS_BOOL_TRUE) { /* SLRq.2 */ goto ldp_label_request_send_error; } (*ds_attr)->msg_id = g->message_identifier++; ldp_label_request_prepare_msg(s->tx_message, (*ds_attr)->msg_id, *ds_attr); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL, "Label Request Sent: session(%d)\n", s->index); if (ldp_mesg_send_tcp(g, s, s->tx_message) == MPLS_FAILURE) { /* SLRq.3 */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_ERROR, "Label Request send failed\n"); goto ldp_label_request_send_error; } (*ds_attr)->state = LDP_LSP_STATE_REQ_SENT; if (ldp_attr_insert_downstream(g, s, (*ds_attr)) == MPLS_FAILURE) { /* SLRq.4 */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_ERROR, "Couldn't insert sent attributes in tree\n"); goto ldp_label_request_send_error; } if (us_attr) { ldp_attr_add_us2ds(us_attr, *ds_attr); } LDP_EXIT(g->user_data, "ldp_label_request_send"); return MPLS_SUCCESS; /* SLRq.5 */ ldp_label_request_send_error: LDP_PRINT(g->user_data, "SLRq.6\n"); (*ds_attr)->state = LDP_LSP_STATE_NO_LABEL_RESOURCE_SENT; ldp_attr_insert_downstream(g, s, (*ds_attr)); /* SLRq.6 */ LDP_EXIT(g->user_data, "ldp_label_request_send-error"); return MPLS_FAILURE; /* SLRq.7 */ }
mpls_return_enum ldp_label_request_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * us_attr, ldp_fec * f) { ldp_session *nh_session = NULL; ldp_nexthop *nh = NULL; ldp_attr_list *us_list = NULL; mpls_bool egress = MPLS_BOOL_FALSE; ldp_attr *ds_attr = NULL; ldp_attr *us_temp = NULL; if (Check_Received_Attributes(g, s, us_attr, MPLS_LBLREQ_MSGTYPE) != MPLS_SUCCESS) { /* LRp.1 */ goto LRq_13; } if (f == NULL) { ldp_notif_send(g, s, us_attr, LDP_NOTIF_NO_ROUTE); /* LRq.5 */ goto LRq_13; } /* just find one valid nexthop session for now */ nh = MPLS_LIST_HEAD(&f->nh_root); while (nh) { nh_session = ldp_session_for_nexthop(nh); if (nh_session) { break; } nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec); } if (!nh_session) { egress = MPLS_BOOL_TRUE; } if (nh_session != NULL && s->index == nh_session->index) { /* LRq.3 */ ldp_notif_send(g, s, us_attr, LDP_NOTIF_LOOP_DETECTED); /* LRq.4 */ goto LRq_13; } if ((us_list = ldp_attr_find_upstream_all2(g, s, f)) != NULL) { us_temp = MPLS_LIST_HEAD(us_list); while (us_temp != NULL) { if (us_temp->state == LDP_LSP_STATE_REQ_RECV && /* LRq.6 */ us_temp->msg_id == us_attr->msg_id) { /* LRq.7 */ goto LRq_13; } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } us_attr->state = LDP_LSP_STATE_REQ_RECV; /* LRq.8 */ if (ldp_attr_insert_upstream2(g, s, us_attr, f) != MPLS_SUCCESS) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_ERROR, "Couldn't insert recv attributes in tree\n"); goto ldp_label_request_process_error; } if (nh_session) { ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); } else { ds_attr = NULL; } if (g->lsp_control_mode == LDP_CONTROL_INDEPENDENT) { /* LRq.9 */ if (ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } if (egress == MPLS_BOOL_TRUE || ds_attr) { goto LRq_11; } } else { if ((!(egress == MPLS_BOOL_TRUE || ds_attr)) || (g->label_merge == MPLS_BOOL_FALSE)) { goto LRq_10; } if (ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } goto LRq_11; } LRq_10: ds_attr = NULL; if (ldp_label_request_for_xc(g, nh_session, &f->info, us_attr, &ds_attr) != MPLS_SUCCESS) { goto ldp_label_request_process_error; } LRq_11: /* the work done by LRq_11 is handled in ldp_label_mapping_with_xc() */ LRq_13: if (ds_attr != NULL && ds_attr->in_tree == MPLS_BOOL_FALSE) { ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); } return MPLS_SUCCESS; ldp_label_request_process_error: return MPLS_FAILURE; }
void ldp_label_request_initial_callback(mpls_timer_handle timer, void *extra, mpls_cfg_handle handle) { ldp_session *s = (ldp_session *)extra; ldp_global *g = (ldp_global*)handle; ldp_nexthop *nh = NULL; ldp_fec *f = NULL; ldp_session *nh_session = NULL; mpls_bool done = MPLS_BOOL_FALSE; ldp_attr *attr = NULL; ldp_fs *fs = NULL; ldp_attr *ds_attr = NULL; LDP_ENTER(g->user_data, "ldp_label_request_initial_callback"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_TIMER, "Initial Label Request Callback fired: session(%d)\n", s->index); mpls_lock_get(g->global_lock); mpls_timer_stop(g->timer_handle, timer); if ((f = MPLS_LIST_HEAD(&g->fec))) { do { if ((nh = MPLS_LIST_HEAD(&f->nh_root))) { do { switch (f->info.type) { case MPLS_FEC_PREFIX: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing prefix FEC: %08x/%d ", f->info.u.prefix.network.u.ipv4, f->info.u.prefix.length); break; case MPLS_FEC_HOST: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing host FEC: %08x ", f->info.u.host.u.ipv4); break; case MPLS_FEC_L2CC: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing L2CC FEC: %d %d %d ", f->info.u.l2cc.connection_id, f->info.u.l2cc.group_id, f->info.u.l2cc.type); break; default: MPLS_ASSERT(0); } if (nh->info.type & MPLS_NH_IP) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %08x\n", nh->addr->address.u.ipv4); } if (nh->info.type & MPLS_NH_IF) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %p\n", nh->iff->handle); } /* check to see if export policy allows us to 'see' this route */ if (mpls_policy_export_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_DEBUG, "Rejected by export policy\n"); continue; } /* find the next hop session corresponding to this FEC */ nh_session = ldp_session_for_nexthop(nh); /* do we have a valid next hop session, and is the nexp hop session * this session? */ if ((!nh_session) || (nh_session->index != s->index)) { continue; } /* have we already sent a label request to this peer for this FEC? */ if (ldp_attr_find_downstream_state(g, s, &f->info, LDP_LSP_STATE_REQ_SENT)) { continue; } /* clear out info from the last FEC */ ds_attr = NULL; /* jleu: duplicate code from ldp_attr_find_upstream_state_any */ fs = MPLS_LIST_HEAD(&f->fs_root_us); while (fs) { attr = MPLS_LIST_HEAD(&fs->attr_root); while (attr) { if (attr->state == LDP_LSP_STATE_REQ_RECV || attr->state == LDP_LSP_STATE_MAP_SENT) { if (!ds_attr) { /* this is not neccessarily going to be XC'd to something */ ldp_label_request_for_xc(g, s, &f->info, attr, &ds_attr); } } attr = MPLS_LIST_NEXT(&fs->attr_root, attr, _fs); } fs = MPLS_LIST_NEXT(&f->fs_root_us, fs, _fec); } if (!ds_attr) { /* * we did not find any received requests or sent mappings so * send a request and xc it to nothing */ ldp_label_request_for_xc(g, s, &f->info, NULL, &ds_attr); } } while ((nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec))); } } while ((f = MPLS_LIST_NEXT(&g->fec, f, _global))); done = MPLS_BOOL_TRUE; } if (done == MPLS_BOOL_TRUE) { mpls_timer_delete(g->timer_handle, timer); MPLS_REFCNT_RELEASE(s, ldp_session_delete); s->initial_distribution_timer = (mpls_timer_handle) 0; } else { mpls_timer_start(g->timer_handle, timer, MPLS_TIMER_ONESHOT); /* need to mark the session with where it left off */ } mpls_lock_release(g->global_lock); LDP_EXIT(g->user_data, "ldp_label_request_initial_callback"); }
mpls_return_enum ldp_event(mpls_cfg_handle handle, mpls_socket_handle socket, void *extra, ldp_event_enum event) { mpls_return_enum retval = MPLS_SUCCESS; ldp_global *g = (ldp_global*)handle; mpls_socket_handle socket_new = (mpls_socket_handle)0; ldp_session *session = NULL; ldp_entity *entity = NULL; ldp_adj *adj = NULL; uint8_t buffer[MPLS_PDUMAXLEN]; mpls_dest from; ldp_mesg mesg; ldp_buf buf; LDP_ENTER(g->user_data, "ldp_event"); mpls_lock_get(g->global_lock); switch (event) { case LDP_EVENT_TCP_DATA: case LDP_EVENT_UDP_DATA: { mpls_bool more; buf.current = buffer; buf.buffer = buffer; buf.total = MPLS_PDUMAXLEN; buf.size = 0; buf.current_size = 0; buf.want = 0; /* do this so a failure will know which session caused it */ if (event == LDP_EVENT_TCP_DATA) { session = extra; } do { retval = ldp_buf_process(g, socket, &buf, extra, event, &from, &more); } while (retval == MPLS_SUCCESS && more == MPLS_BOOL_TRUE); break; } case LDP_EVENT_TCP_LISTEN: { socket_new = mpls_socket_tcp_accept(g->socket_handle, socket, &from); if (mpls_socket_handle_verify(g->socket_handle, socket_new) == MPLS_BOOL_FALSE) { LDP_PRINT(g->user_data, "Failed accepting socket\n"); retval = MPLS_FAILURE; } else if (!(session = ldp_session_create_passive(g, socket_new, &from))) { mpls_socket_close(g->socket_handle, socket_new); LDP_PRINT(g->user_data, "Failure creating passive session\n"); retval = MPLS_FATAL; } else { retval = ldp_state_machine(g, session, NULL, NULL, LDP_EVENT_CONNECT, &mesg, &from); } break; } case LDP_EVENT_TCP_CONNECT: { retval = mpls_socket_connect_status(g->socket_handle, socket); session = (ldp_session *)extra; if (retval == MPLS_SUCCESS) { /* only get this case if we did a non-block connect */ mpls_socket_writelist_del(g->socket_handle, socket); retval = ldp_state_machine(g, session, NULL, NULL, LDP_EVENT_CONNECT, &mesg, &from); } else if (retval != MPLS_NON_BLOCKING) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: LDP_EVENT_TCP_CONNECT errno = %d\n", mpls_socket_get_errno(g->socket_handle, socket)); } else { /* non-blocking connect is still blocking, we'll try again in a bit */ retval = MPLS_SUCCESS; } break; } case LDP_EVENT_CLOSE: { retval = ldp_state_machine(g, session, adj, entity, LDP_EVENT_CLOSE, &mesg, &from); break; } default: { MPLS_ASSERT(0); } } /* ldp_state_machine return MPLS_SUCCESS when it has handled the event to completion. If the handling off the event results in the session needing to be shutdown MPLS_FAILURE is returned. If the handling of the event requires the LDP be shutdown LD_FATAL is returned, and passed back to the user. other values are invalid */ switch (retval) { case MPLS_FAILURE: { /* if shutting down the session results in LDP_FATAL, then pass it * back to the user */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: FAILURE executing a CLOSE\n"); retval = ldp_state_machine(g, session, adj, entity, LDP_EVENT_CLOSE, NULL, &from); if (retval == MPLS_FATAL) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: CLOSE failed: FATAL propogated to the environemnt\n"); } break; } case MPLS_FATAL: { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: FATAL propogated to the environemnt\n"); break; } case MPLS_SUCCESS: { break; } default: { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: invalid return value of %d\n", retval); break; } } mpls_lock_release(g->global_lock); LDP_EXIT(g->user_data, "ldp_event"); return retval; }
mpls_return_enum ldp_buf_process(ldp_global * g, mpls_socket_handle socket, ldp_buf * buf, void *extra, ldp_event_enum event, mpls_dest * from, mpls_bool * more) { mpls_return_enum retval = MPLS_SUCCESS; ldp_session *session = NULL; ldp_entity *entity = NULL; ldp_adj *adj = NULL; ldp_mesg mesg; int size = 0; LDP_ENTER(g->user_data, "ldp_buf_process"); *more = MPLS_BOOL_TRUE; memset(&mesg, 0, sizeof(mesg)); if (!buf->want) { buf->want = MPLS_LDP_HDRSIZE; } read_again: switch (event) { case LDP_EVENT_TCP_DATA: { session = (ldp_session *) extra; MPLS_ASSERT(session); session->mesg_rx++; size = mpls_socket_tcp_read(g->socket_handle, socket, buf->buffer + buf->size, buf->want - buf->size); if (!size) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: LDP_EVENT_TCP_DATA errno = %d\n", mpls_socket_get_errno(g->socket_handle, socket)); retval = MPLS_FAILURE; session->shutdown_notif = LDP_NOTIF_SHUTDOWN; session->shutdown_fatal = MPLS_BOOL_TRUE; goto ldp_event_end; } if (size < 0) { retval = MPLS_SUCCESS; *more = MPLS_BOOL_FALSE; goto ldp_event_end; } break; } case LDP_EVENT_UDP_DATA: { size = mpls_socket_udp_recvfrom(g->socket_handle, socket, buf->buffer + buf->size, buf->total - buf->size, from); if (!size) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR, "ldp_event: LDP_EVENT_UDP_DATA errno = %d\n", mpls_socket_get_errno(g->socket_handle, socket)); retval = MPLS_FAILURE; goto ldp_event_end; } if (size < 0) { retval = MPLS_SUCCESS; *more = MPLS_BOOL_FALSE; goto ldp_event_end; } break; } default: { MPLS_ASSERT(0); break; } } buf->current_size += size; buf->size += size; decode_again: if (buf->size < buf->want) { retval = MPLS_SUCCESS; *more = MPLS_BOOL_FALSE; goto ldp_event_end; } /* upon succesful decode the pduLength will be non 0 */ if (!mesg.header.pduLength) { if (ldp_decode_header(g, buf, &mesg) != MPLS_SUCCESS) { retval = MPLS_FAILURE; if (session) { session->shutdown_notif = LDP_NOTIF_BAD_MESG_LEN; } goto ldp_event_end; } /* -buf->size is already 10 (the size of the full header * -pduLength include 6 bytes of the header * * therefore add 4 so we can compare buf->want to buf->size and * not have to adjust */ buf->want = mesg.header.pduLength + 4; if (buf->size < buf->want) { goto read_again; } if (buf->size > buf->want) { buf->current_size = buf->want - MPLS_LDP_HDRSIZE; } } do { if (ldp_decode_one_mesg(g, buf, &mesg) != MPLS_SUCCESS) { retval = MPLS_FAILURE; if (session) { session->shutdown_notif = LDP_NOTIF_BAD_MESG_LEN; } goto ldp_event_end_loop; } switch (ldp_mesg_get_type(&mesg)) { case MPLS_HELLO_MSGTYPE: { mpls_oper_state_enum oper_state = MPLS_OPER_DOWN; mpls_inet_addr addr; int labelspace = 0; int targeted; event = LDP_EVENT_HELLO; targeted = 0; ldp_mesg_hello_get_targeted(&mesg, &targeted); ldp_mesg_hdr_get_lsraddr(&mesg, &addr); ldp_mesg_hdr_get_labelspace(&mesg, &labelspace); if (targeted) { ldp_peer *peer = NULL; if ((peer = ldp_global_find_peer_addr(g, &addr))) { entity = ldp_peer_get_entity(peer); oper_state = peer->oper_state; } } else { ldp_if *iff = NULL; if ((iff = ldp_global_find_if_handle(g, from->if_handle))) { entity = ldp_if_get_entity(iff); oper_state = iff->oper_state; } } if (!entity) { /* No entity! No choice but to ignore this packet */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_NORMAL, "ldp_event: unknown entity\n"); goto ldp_event_end_loop; } else if (entity->admin_state == MPLS_ADMIN_DISABLE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_NORMAL, "ldp_event: entity is disabled\n"); goto ldp_event_end_loop; } else if (oper_state == MPLS_OPER_DOWN) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_NORMAL, "ldp_event: entity is down\n"); goto ldp_event_end_loop; } if ((adj = ldp_entity_find_adj(entity, &mesg))) { session = adj->session; } else { session = NULL; } /* if we don't have an adj one will be create by state machine */ break; } case MPLS_INIT_MSGTYPE: { event = LDP_EVENT_INIT; break; } case MPLS_NOT_MSGTYPE: { event = LDP_EVENT_NOTIF; break; } case MPLS_KEEPAL_MSGTYPE: { event = LDP_EVENT_KEEP; break; } case MPLS_LBLWITH_MSGTYPE: case MPLS_LBLREL_MSGTYPE: case MPLS_LBLREQ_MSGTYPE: case MPLS_LBLMAP_MSGTYPE: case MPLS_LBLABORT_MSGTYPE: { event = LDP_EVENT_LABEL; break; } case MPLS_ADDR_MSGTYPE: case MPLS_ADDRWITH_MSGTYPE: { event = LDP_EVENT_ADDR; break; } default: { MPLS_ASSERT(0); } } retval = ldp_state_machine(g, session, adj, entity, event, &mesg, from); ldp_event_end_loop: if (retval != MPLS_SUCCESS) { break; } } while ((buf->current_size > 0) && (*more == MPLS_BOOL_TRUE)); if (buf->want < buf->size) { buf->current_size = buf->size - buf->want; buf->size = buf->current_size; memmove(buf->buffer, buf->current, buf->current_size); } else { buf->size = 0; } buf->current = buf->buffer; memset(&mesg, 0, sizeof(mesg)); buf->want = MPLS_LDP_HDRSIZE; if (buf->current_size) { goto decode_again; } ldp_event_end: LDP_EXIT(g->user_data, "ldp_buf_process"); return retval; }
mpls_return_enum ldp_label_mapping_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f) { mpls_return_enum retval = MPLS_SUCCESS; ldp_session *peer = NULL; ldp_attr_list *us_list = NULL; ldp_attr_list *ds_list = NULL; ldp_attr *ds_attr = NULL; ldp_attr *ds_temp = NULL; ldp_attr *us_attr = NULL; ldp_attr *us_temp = NULL; ldp_attr dumb_attr; ldp_nexthop *nh = NULL; ldp_outlabel *out = NULL; mpls_bool requested = MPLS_BOOL_FALSE; ldp_attr *existing = NULL; mpls_bool need_request = MPLS_BOOL_FALSE; LDP_ENTER(g->user_data, "ldp_label_mapping_process"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Label Mapping Recv from %s for %08x/%d\n", s->session_name, r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen); if ((ds_attr = ldp_attr_find_downstream_state2(g, s, f, LDP_LSP_STATE_REQ_SENT)) != NULL) { /* LMp.1 */ /* just remove the req from the tree, we will use the r_attr sent to us */ ldp_attr_delete_downstream(g, s, ds_attr); requested = MPLS_BOOL_TRUE; } else { requested = MPLS_BOOL_FALSE; } ds_attr = r_attr; ds_attr->state = LDP_LSP_STATE_MAP_RECV; /* LMp.2 */ /* * ds_attr is the mapping we will keep and is NOT in the tree, unless * it is an update mapping ... */ if (Check_Received_Attributes(g, s, ds_attr, MPLS_LBLMAP_MSGTYPE) == MPLS_SUCCESS) { /* LMp.3 */ goto LMp_9; } /* * A loop was detected */ if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) { ds_temp = MPLS_LIST_HEAD(ds_list); /* * check all the labels this session has received from "s" for "fec" * do we have a duplicat? */ while (ds_temp) { if ((ds_temp->state == LDP_LSP_STATE_MAP_RECV) && /* LMp.4 */ ldp_attr_is_equal(ds_temp, ds_attr, LDP_ATTR_LABEL) == /* LMp.5 */ MPLS_BOOL_TRUE) { /* remove record of the label and remove it switching */ ldp_attr_remove_complete(g, ds_temp, MPLS_BOOL_TRUE); /* LMp.6,7 */ /* * I think this is supposed to be 32 NOT 33, we need to release * it don't we? */ goto LMp_33; } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } LDP_PRINT(g->user_data, "Receive_Label_Map_8: send release"); if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_LOOP_DETECTED) != MPLS_SUCCESS) { /* LMp.8 */ retval = MPLS_FAILURE; } goto LMp_33; LMp_9: /* * No Loop Detected */ ds_temp = ldp_attr_find_downstream_state2(g, s, f, LDP_LSP_STATE_MAP_RECV); if (requested == MPLS_BOOL_TRUE || g->label_merge == MPLS_BOOL_FALSE || !ds_temp) { /* !merging then this is always a new LSP * merging w/o a recv'd mapping is a new LSP * this check comes from Note 6 */ goto LMp_11; } /* searching all recv'd attrs for matched mappings, * stop after finding 1st match */ if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) { ds_temp = MPLS_LIST_HEAD(ds_list); while (ds_temp) { if (ds_temp->state == LDP_LSP_STATE_MAP_RECV) { /* LMp.9 */ if (ldp_attr_is_equal(ds_attr, ds_temp, LDP_ATTR_LABEL) == MPLS_BOOL_TRUE) { /* LMp.10 */ /* * this mapping matches an existing mapping, but it * could contain updated attributes */ existing = ds_temp; break; } else { /* * we have been given another label for the same FEC and we * didn't request it, release it */ LDP_PRINT(g->user_data, "LMp.10 dup without req\n"); goto LMp_32; } } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } if (existing) { ldp_attr2ldp_attr(ds_attr, existing, LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH | LDP_ATTR_MSGID | LDP_ATTR_LSPID | LDP_ATTR_TRAFFIC); ds_attr = existing; /* * no need to free ds_attr, since it was not added to the tree it * will be deleted when we exit ldp_label_mapping_process(), see * ldp_state_process(). */ } /* * from this point on.... if this is an updated mapping then ds_attr * is the existing mapping which has now been update, else ds_attr * is the new mapping */ LMp_11: /* * existing ONLY has a value for updated label mapping */ nh = ldp_nexthop_for_fec_session(f,s); /* LMp.11 */ /* * the following departs from the procedure, it allows for filtering * of label mappings * * Are we configured to accept and INSTALL this mapping? */ if (mpls_policy_import_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { /* * policy has rejected it, store it away */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Label Mapping for %08x/%d from %s filtered by import policy\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name); if (existing) { ds_attr->filtered = MPLS_BOOL_TRUE; if (ds_attr->outlabel && ds_attr->outlabel->switching == MPLS_BOOL_TRUE) { /* the mapping has been filtered, but the original wasn't? */ MPLS_ASSERT(0); } } else { ds_attr->filtered = MPLS_BOOL_TRUE; if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } } goto LMp_33; } if (!nh) { /* LMp.12 */ /* * if we did not find a nh hop for this FEC that corresponded to the * MsgSource then the MsgSource is not a nexthop for the FEC */ if (g->label_retention_mode == LDP_RETENTION_CONSERVATIVE) { /* LMp.13C */ LDP_PRINT(g->user_data, "LMp.13C conservative\n"); goto LMp_32; } /* * store it away */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Session %s is not a valid nexthop for %08x/%d\n", s->session_name, r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen); if (!existing) { /* LMp.13L */ if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } } goto LMp_33; } /* * this is slightly different form the procedure, we can still be * transit for a FEC we are not configured to be ingress for. * Either way we only need to do the "install for fwd/switching" * only once. We could arrive here multiple times due to updates, * only install it the first time */ if ((!existing) || (!existing->outlabel)) { /* * we haven't installed it yet. * Either new (!existing), or a result of a "Detect FEC Nexthop Change" * and we had this mapping in our database (!existing->outlabel)) */ if (!(out = ldp_outlabel_create_complete(g, s, ds_attr, nh))) { LDP_PRINT(g->user_data, "LMp.15 failure creating outlabel\n"); goto LMp_32; } LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING, "Out Label Added\n"); } /* * are we configured to act as ingress for this FEC? */ if (mpls_policy_ingress_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_TRUE) { /* LMp.14 */ /* * yep, bind the label to the FEC */ if (ds_attr->ingress != MPLS_BOOL_TRUE) { #if MPLS_USE_LSR lsr_ftn ftn; ftn.outsegment_index = ds_attr->outlabel->info.handle; memcpy(&ftn.fec, &f->info, sizeof(mpls_fec)); lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_ADD|LSR_FTN_CFG_FEC| LSR_FTN_CFG_OUTSEGMENT); #else mpls_mpls_fec2out_add(g->mpls_handle, &f->info, &ds_attr->outlabel->info); #endif ds_attr->ingress = MPLS_BOOL_TRUE; ds_attr->outlabel->merge_count++; LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Acting as ingress for %08x/%d from %s\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name); } } /* create a set of attrs that we will fill and compare against * if this mapping were to be propogate these are the attrs it would have * by comparing what we did sent in the past to these, we con figure out * if we need to send an updated mapping */ memset(&dumb_attr, 0, sizeof(ldp_attr)); mpls_fec2fec_tlv(&f->info, &dumb_attr.fecTlv, 0); dumb_attr.fecTlvExists = 1; dumb_attr.fecTlv.numberFecElements = 1; /* * by definition (we received a label mapping that will be used) this * LSR is _not_ the egress, so calculate a hop and path based on the * mapping we received. We will compare this with mapping that have * already been sent. If they differ, we will send an updated mapping */ Prepare_Label_Mapping_Attributes(g, s, &f->info, ds_attr, &dumb_attr, MPLS_BOOL_TRUE, MPLS_BOOL_TRUE, MPLS_BOOL_FALSE); if (!existing) { /* * this is the first time we've seen this mapping, add it to the database. * all future updates will modify this entry in place */ /* LMp.16 */ printf("!!!LMp16!!!\n"); if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } peer = MPLS_LIST_HEAD(&g->session); while (peer) { /* LMp.17 */ if (peer->state != LDP_STATE_OPERATIONAL) { goto next_peer; } /* * it is just as easy to walk the list of all upstream attr for this * peer as it is to the individual check to see if we have sent a * label mapping for this FEC LSP */ // #error this whole section is f ed /* LMp.22 - 27 */ if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) { /* LMp.23 */ us_temp = MPLS_LIST_HEAD(us_list); while (us_temp) { /* * if we have sent a label mapping for the FEC and that label mapping * was an done in independent mode or it is part of an LSP created * due as part of an existing received label mapping */ /* LMp.18 */ if (us_temp->state == LDP_LSP_STATE_MAP_SENT) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Already sent mapping for %08x/%d to %s\n", r_attr->fecTlv.fecElArray[0].addressEl.address, r_attr->fecTlv.fecElArray[0].addressEl.preLen, peer->session_name); if ((!existing) || (existing->index == us_temp->ds_attr->index)) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Part of same LSP\n"); /* are the received attrs the same as the ones we've already sent */ if (ldp_attr_is_equal(us_temp, &dumb_attr, LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH) != MPLS_BOOL_TRUE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING, "Propogating updated attrs\n"); /* send an updated label mapping */ if (ldp_label_mapping_with_xc(g, us_temp->session, f, &us_temp, ds_attr) != MPLS_SUCCESS) { /* LMp.24-26 */ retval = MPLS_FAILURE; goto LMp_33; } } } } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } if ((peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) && (g->lsp_control_mode == LDP_CONTROL_ORDERED)) { /* LMp.19 */ /* * if we're not merging and we have multiple ORDERED DU sessions, * we will to start requesting labels after we propogate the mapping to * the first peer */ if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding, make one */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, NULL, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { /* * We're in DU more, either we're merging, or we're not merging and * this is the first peer we're propogating this mapping to */ /* LMp.20-21,30 */ us_attr = NULL; if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } /* * if we're not merging, we will need to request a label for * the next DU peer */ if (g->label_merge == MPLS_BOOL_FALSE) { need_request = MPLS_BOOL_TRUE; } } } /* LMp.28 */ while ((us_temp = ldp_attr_find_upstream_state2(g, peer, f, LDP_LSP_STATE_REQ_RECV))) { if (peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) { if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, us_temp, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if (ldp_label_mapping_with_xc(g, peer, f, &us_temp, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) { us_temp = MPLS_LIST_HEAD(ds_list); while (us_temp) { if (us_temp->state == LDP_LSP_STATE_REQ_RECV) { if (need_request == MPLS_BOOL_TRUE) { if (ldp_attr_find_downstream_state2(g, peer, f, LDP_LSP_STATE_REQ_SENT) == NULL) { /* * we don't have a request for FEC to peer outstanding */ ds_temp = NULL; if (ldp_label_request_for_xc(g, peer, &f->info, us_temp, &ds_temp) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } } } else { if (ldp_label_mapping_with_xc(g, peer, f, &us_temp, ds_attr) != MPLS_SUCCESS) { retval = MPLS_FAILURE; goto LMp_33; } /* * if we're not merging, we will need to request a label for * the next DU peer */ if (g->label_merge == MPLS_BOOL_FALSE) { need_request = MPLS_BOOL_TRUE; } } } us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs); } } } } next_peer: peer = MPLS_LIST_NEXT(&g->session, peer, _global); } LMp_33: LDP_EXIT(g->user_data, "ldp_label_mapping_process"); return retval; LMp_32: LDP_PRINT(g->user_data, "Receive_Label_Map_32: send release"); if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { retval = MPLS_FAILURE; } LDP_EXIT(g->user_data, "ldp_label_mapping_process"); return retval; }
mpls_return_enum ldp_label_mapping_send(ldp_global * g, ldp_session * s, ldp_fec *f, ldp_attr * us_attr, ldp_attr * ds_attr) { ldp_inlabel *in = NULL; ldp_attr *us_temp, *existing = NULL; LDP_ENTER(g->user_data, "ldp_label_mapping_send"); MPLS_ASSERT(us_attr); #if 0 /* * before we can enable this, inlabels need to keep track of all of * the attr that link to it. Then when running in DU independent mode we * can correctly attach the us and ds attrs involved when propogating a * new mapping for a FEC we've already distributed labels for */ existing = ldp_attr_find_upstream_map_in_labelspace(f, s->cfg_label_space); #endif if (existing) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING, "Using an existing label\n"); in = existing->inlabel; ldp_attr_add_inlabel(us_attr, in); } else { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING, "Generating a label\n"); in = ldp_inlabel_create_complete(g, s, us_attr); } if (!in) { /* SL.1-3 */ goto Send_Label_9; } LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING, "In Label Added\n"); us_attr->state = LDP_LSP_STATE_MAP_SENT; us_attr->msg_id = g->message_identifier; ldp_label_mapping_prepare_msg(s->tx_message, g->message_identifier++, us_attr); if (ldp_mesg_send_tcp(g, s, s->tx_message) != MPLS_SUCCESS) { /* SL.4 */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_ERROR, "Failed sending Label Mapping to %s\n", s->session_name); goto ldp_label_mapping_send_error; } LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL, "Label Mapping Sent to %s for %08x/%d\n", s->session_name, us_attr->fecTlv.fecElArray[0].addressEl.address, us_attr->fecTlv.fecElArray[0].addressEl.preLen); us_attr->state = LDP_LSP_STATE_MAP_SENT; /* SL.6,7 */ LDP_EXIT(g->user_data, "ldp_label_mapping_send"); return MPLS_SUCCESS; /* SL.8 */ Send_Label_9: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_STATE, "No Label Resources\n"); while ((us_temp = ldp_attr_find_upstream_state2(g, s, us_attr->fec, LDP_LSP_STATE_REQ_RECV)) != NULL) { /* SL.9 */ ldp_notif_send(g, s, us_temp, LDP_NOTIF_NO_LABEL_RESOURCES_AVAILABLE); /* SL.10 */ s->no_label_resource_sent = MPLS_BOOL_TRUE; /* SL.12 */ us_temp->state = LDP_LSP_STATE_NO_LABEL_RESOURCE_SENT; /* SL.13 */ } LDP_EXIT(g->user_data, "ldp_label_mapping_send"); return MPLS_SUCCESS; ldp_label_mapping_send_error: LDP_EXIT(g->user_data, "ldp_label_mapping_send-error"); return MPLS_FAILURE; }
void ldp_label_mapping_initial_callback(mpls_timer_handle timer, void *extra, mpls_cfg_handle handle) { ldp_session *s = (ldp_session *) extra; ldp_global *g = (ldp_global*)handle; ldp_attr *ds_attr = NULL; ldp_attr *us_attr = NULL; ldp_session *nh_session = NULL; mpls_bool done = MPLS_BOOL_FALSE; ldp_fec *f; ldp_nexthop *nh; LDP_ENTER(g->user_data, "ldp_label_mapping_initial_callback"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_TIMER, "Initial Label Mapping fired: session(%d)\n", s->index); mpls_lock_get(g->global_lock); mpls_timer_stop(g->timer_handle, timer); f = MPLS_LIST_HEAD(&g->fec); while (f) { nh = MPLS_LIST_HEAD(&f->nh_root); while (nh) { switch (f->info.type) { case MPLS_FEC_PREFIX: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing prefix FEC: %08x/%d ", f->info.u.prefix.network.u.ipv4, f->info.u.prefix.length); break; case MPLS_FEC_HOST: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processing host FEC: %08x ", f->info.u.host.u.ipv4); break; case MPLS_FEC_L2CC: LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Processingu L2CC FEC: %d %d %d ", f->info.u.l2cc.connection_id, f->info.u.l2cc.group_id, f->info.u.l2cc.type); break; default: MPLS_ASSERT(0); } if (nh->info.type & MPLS_NH_IP) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %08x\n", nh->addr->address.u.ipv4); } if (nh->info.type & MPLS_NH_IF && nh->iff) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "via %p\n", nh->iff->handle); } /* are we allowed to export this route from the rib */ if (mpls_policy_export_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_FALSE) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_POLICY, "Rejected by export policy\n"); goto ldp_label_mapping_initial_callback_end_nh; } /* have we already sent a mapping for this fec to the new session? */ if ((us_attr = ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_MAP_SENT))) { /* no need to sent another mapping */ LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Already sent this FEC to session %d\n", s->index); goto ldp_label_mapping_initial_callback_end_nh; } if (!(nh_session = ldp_get_next_hop_session_for_fec2(f,nh))) { ds_attr = NULL; } else { if (nh_session->index == s->index) { LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ROUTE, "Nexthop session(%d) == session(%d)\n", nh_session->index, s->index); goto ldp_label_mapping_initial_callback_end_nh; } ds_attr = ldp_attr_find_downstream_state2(g, nh_session, f, LDP_LSP_STATE_MAP_RECV); } if ((g->label_merge != MPLS_BOOL_TRUE) && ldp_attr_num_us2ds(ds_attr)) { /* we have a ds label, but can't use it */ ds_attr = NULL; } us_attr = NULL; if (ds_attr) { /* we can use it, merge on baby */ ldp_label_mapping_with_xc(g, s, f, &us_attr, ds_attr); } else { /* we don't have a ds label */ /* we will be egress? */ if (g->lsp_control_mode == LDP_CONTROL_ORDERED) { if (mpls_policy_egress_check(g->user_data, &f->info, &nh->info) == MPLS_BOOL_TRUE) { ldp_label_mapping_with_xc(g, s, f, &us_attr, NULL); } } else { ldp_label_mapping_with_xc(g, s, f, &us_attr, NULL); } } ldp_label_mapping_initial_callback_end_nh: nh = MPLS_LIST_NEXT(&f->nh_root, nh, _fec); } f = MPLS_LIST_NEXT(&g->fec, f, _global); } done = MPLS_BOOL_TRUE; if (done == MPLS_BOOL_TRUE) { mpls_timer_delete(g->timer_handle, timer); MPLS_REFCNT_RELEASE(s, ldp_session_delete); s->initial_distribution_timer = (mpls_timer_handle) 0; } else { mpls_timer_start(g->timer_handle, timer, MPLS_TIMER_ONESHOT); /* need to mark the session with where it left off */ } mpls_lock_release(g->global_lock); LDP_EXIT(g->user_data, "ldp_label_mapping_initial_callback"); }
mpls_return_enum ldp_label_mapping_with_xc(ldp_global * g, ldp_session * s, ldp_fec * f, ldp_attr ** us_attr, ldp_attr * ds_attr) { mpls_return_enum result = MPLS_SUCCESS; mpls_bool propogating = MPLS_BOOL_TRUE; mpls_bool egress = MPLS_BOOL_TRUE; mpls_bool created = MPLS_BOOL_FALSE; MPLS_ASSERT(us_attr); if (!(*us_attr)) { if (!((*us_attr) = ldp_attr_create(&f->info))) { return MPLS_FAILURE; } created = MPLS_BOOL_TRUE; } if (!ds_attr) { propogating = MPLS_BOOL_FALSE; egress = MPLS_BOOL_TRUE; } Prepare_Label_Mapping_Attributes(g, s, &f->info, ds_attr, (*us_attr), propogating, MPLS_BOOL_TRUE, egress); result = ldp_label_mapping_send(g, s, f, (*us_attr), ds_attr); if (result != MPLS_SUCCESS) { if (created == MPLS_BOOL_TRUE) { ldp_attr_delete(*us_attr); } return result; } if (created == MPLS_BOOL_TRUE) { result = ldp_attr_insert_upstream2(g, s, (*us_attr), f); if (result != MPLS_SUCCESS) { ldp_attr_delete(*us_attr); return result; } } /* * If we have a downstream mapping (not neccessarily installed) and * the downstream and upstream session are not the same.... */ if (ds_attr && ((*us_attr)->session->index != ds_attr->session->index)) { /* then link the attra */ ldp_attr_add_us2ds((*us_attr), ds_attr); /* if we just created the upstream, and we have install the * downstream, then cross connect them */ if ((created == MPLS_BOOL_TRUE) && ds_attr->outlabel) { if ((*us_attr)->inlabel->outlabel) { /* * if we use an existing upstream mapping (in ldp_label_mapping_send()) * the inlabel will already be be connected to an outlabel; */ MPLS_ASSERT((*us_attr)->inlabel->outlabel == ds_attr->outlabel); } else { LDP_TRACE_LOG(g->user_data,MPLS_TRACE_STATE_ALL,LDP_TRACE_FLAG_BINDING, "Cross Connect Added for %08x/%d from %s -> %s\n", f->info.u.prefix.network.u.ipv4, f->info.u.prefix.length, (*us_attr)->session->session_name, ds_attr->session->session_name); result = ldp_inlabel_add_outlabel(g,(*us_attr)->inlabel, ds_attr->outlabel); if (result != MPLS_SUCCESS) { return result; } } } } return MPLS_SUCCESS; }
mpls_return_enum ldp_label_withdraw_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f) { mpls_bool label_exists = MPLS_BOOL_FALSE; ldp_attr_list *ds_list = NULL; ldp_attr *ds_attr = NULL; ldp_attr *ds_temp = NULL; ldp_attr *us_temp = NULL; ldp_nexthop *nh = NULL; mpls_return_enum retval = MPLS_SUCCESS; LDP_ENTER(g->user_data, "ldp_label_withdraw_process"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Withdraw Recv for %s\n", s->session_name); if (r_attr->genLblTlvExists || r_attr->atmLblTlvExists || r_attr->frLblTlvExists) { label_exists = MPLS_BOOL_TRUE; } else { MPLS_ASSERT(0); } if (f) { if ((ds_list = ldp_attr_find_downstream_all2(g, s, f)) != NULL) { ds_temp = MPLS_LIST_HEAD(ds_list); while (ds_temp) { if (ds_temp->state == LDP_LSP_STATE_MAP_RECV) { /* LWd.3 */ if (ldp_attr_is_equal(r_attr, ds_temp, LDP_ATTR_LABEL)) { ds_attr = ds_temp; break; } } ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs); } } if (!ds_attr) { retval = MPLS_FAILURE; LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Withdraw Recv for a non-existant mapping from %s\n",s->session_name); goto LWd_13; } /* * we want to remove it from the tree, but not delete it yet * so hold a refcnt, we will release that refcnt at the end, thus * deleting it if no one else it holding a refcnt */ MPLS_REFCNT_HOLD(ds_attr); ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); /* LWd.4 */ /* LWd.2 */ if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { retval = MPLS_FATAL; goto LWd_13; } if (g->lsp_control_mode == LDP_CONTROL_ORDERED) { /* LWd.5 */ goto LWd_8; } if (s->oper_distribution_mode != LDP_DISTRIBUTION_ONDEMAND) { /* LWd.6 */ goto LWd_13; } MPLS_ASSERT((nh = ldp_nexthop_for_fec_session(f, s))); retval = ldp_fec_process_add(g, f, nh, s); /* LWd.7 */ goto LWd_13; LWd_8: /* I can only propogate a label withdraw to the upstreams attached to the downstream found above */ us_temp = MPLS_LIST_HEAD(&ds_attr->us_attr_root); while (us_temp) { if (us_temp->state == LDP_LSP_STATE_MAP_SENT) { if (ldp_label_withdraw_send(g, us_temp->session, us_temp, LDP_NOTIF_NONE) != MPLS_SUCCESS) { /* LWd.11 */ retval = MPLS_FATAL; goto LWd_13; } } us_temp = MPLS_LIST_NEXT(&ds_attr->us_attr_root, us_temp, _ds_attr); } } else { /* JLEU: process wildcard FEC stuff here */ MPLS_ASSERT(0); } LWd_13: if (ds_attr) { MPLS_REFCNT_RELEASE2(g, ds_attr, ldp_attr_delete); } LDP_EXIT(g->user_data, "ldp_label_withdraw_process"); return retval; }
mpls_return_enum ldp_label_release_process(ldp_global * g, ldp_session * s, ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f) { mpls_bool label_exists = MPLS_BOOL_FALSE; ldp_attr *us_attr = NULL; ldp_attr *ds_attr = NULL; mpls_return_enum retval = MPLS_SUCCESS; LDP_ENTER(g->user_data, "ldp_label_release_process"); LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL, "Release Recv from %s\n", s->session_name); if (r_attr->genLblTlvExists || r_attr->atmLblTlvExists || r_attr->frLblTlvExists) { label_exists = MPLS_BOOL_TRUE; } if (f) { /* LRl.1 is accomplished at LRl.10 */ us_attr = ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_MAP_SENT); if (!us_attr) { us_attr = ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_WITH_SENT); if (!us_attr) { /* LRl.2 */ goto LRl_13; } /* LRl.3 is accomplished at LRl.10 */ } if (g->label_merge == MPLS_BOOL_FALSE) { /* LR1.4 */ goto LRl_6; } /* LR1.5 */ if (ldp_attr_find_upstream_state_any2(g, f, LDP_LSP_STATE_MAP_SENT)) { goto LRl_10; } LRl_6: /* we can only propogate a release to the downstream attached to the upstream we found up top */ /* LRl.6,7 */ if (us_attr->ds_attr && us_attr->ds_attr->state == LDP_LSP_STATE_MAP_RECV) { ds_attr = us_attr->ds_attr; } else { goto LRl_10; } if (g->propagate_release == MPLS_BOOL_FALSE) { /* LRl.8 */ goto LRl_10; } if (ldp_label_release_send(g, ds_attr->session, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) { /* LRl.9 */ retval = MPLS_FAILURE; } ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); LRl_10: ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE); /* LRl.10,11 */ } else { LDP_PRINT(g->user_data, "No FEC in release, need to implement\n"); MPLS_ASSERT(0); } LRl_13: LDP_EXIT(g->user_data, "ldp_label_release_process"); return retval; }