示例#1
0
mpls_return_enum ldp_mesg_send_udp(ldp_global * g, ldp_entity * e,
                                   ldp_mesg * msg)
{
    ldp_buf *buf = NULL;
    mpls_dest *dest = NULL;
    int32_t result = 0;
    uint16_t label_space = 0;

    MPLS_ASSERT(e);

    switch (e->entity_type) {
    case LDP_DIRECT:
        MPLS_ASSERT(e->p.iff != NULL);
        if (mpls_socket_multicast_if_tx(g->socket_handle, g->hello_socket,
                                        e->p.iff) == MPLS_FAILURE) {
            LDP_PRINT(g->user_data, "ldp_mesg_send_udp: muticast tx error(%d)\n",
                      mpls_socket_get_errno(g->socket_handle, g->hello_socket));
            return MPLS_FAILURE;
        }
        dest = &e->p.iff->dest;
        buf = e->p.iff->tx_buffer;
        label_space = e->p.iff->label_space;
        break;
    case LDP_INDIRECT:
        MPLS_ASSERT(e->p.peer != NULL);
        dest = &e->p.peer->dest;
        buf = e->p.peer->tx_buffer;
        label_space = e->p.peer->label_space;
        break;
    default:
        MPLS_ASSERT(0);
    }
    result =
        ldp_encode_one_mesg(g, g->lsr_identifier.u.ipv4, label_space, buf, msg);

    if (result <= 0)
        return MPLS_FAILURE;

    e->mesg_tx++;

    result = mpls_socket_udp_sendto(g->socket_handle, g->hello_socket,
                                    buf->buffer, buf->size, dest);

    switch (e->entity_type) {
    case LDP_DIRECT:
        mpls_socket_multicast_if_tx(g->socket_handle, g->hello_socket, NULL);
        break;
    case LDP_INDIRECT:
        break;
    default:
        MPLS_ASSERT(0);
    }

    if (result <= 0) {
        LDP_PRINT(g->user_data, "sendto failed(%d)\n", result);
        perror("sendto");
        return MPLS_FAILURE;
    }
    return MPLS_SUCCESS;
}
示例#2
0
mpls_return_enum ldp_mesg_send_tcp(ldp_global * g, ldp_session * s,
                                   ldp_mesg * msg)
{
    int32_t result = 0;

    MPLS_ASSERT(s);

    result = ldp_encode_one_mesg(g, g->lsr_identifier.u.ipv4,
                                 s->cfg_label_space, s->tx_buffer, msg);

    if (result <= 0)
        return MPLS_FAILURE;

    s->mesg_tx++;

    result = mpls_socket_tcp_write(g->socket_handle, s->socket,
                                   s->tx_buffer->buffer, s->tx_buffer->size);

    if (result <= 0) {
        LDP_PRINT(g->user_data, "send failed(%d)\n", result);
        perror("send");
        return MPLS_FAILURE;
    }
    return MPLS_SUCCESS;
}
示例#3
0
void ldp_attr_delete(ldp_global *g, ldp_attr * a)
{
  LDP_PRINT(g->user_data, "attr delete: %p", a);
  MPLS_REFCNT_ASSERT(a, 0);
  MPLS_ASSERT(a->in_tree == MPLS_BOOL_FALSE);
  _ldp_global_del_attr(g, a);
  mpls_free(a);
}
示例#4
0
mpls_return_enum Check_Received_Attributes(ldp_global * g, ldp_session * s,
  ldp_attr * r_attr, uint16_t type)
{
  int count = 0;
  int i;

  if (!r_attr->hopCountTlvExists) { /* CRa.1 */
    goto Check_Received_Attributes_5;
  }

  if (r_attr->hopCountTlv.hcValue >= s->cfg_hop_count_limit) { /* CRa.2 */
    LDP_PRINT(g->user_data, "CRa.2\n");
    goto Check_Received_Attributes_6;
  }

  if (!r_attr->pathVecTlvExists) { /* CRa.3 */
    goto Check_Received_Attributes_5;
  }

  for (i = 0; i < MPLS_MAXHOPSNUMBER; i++) { /* CRa.4 */
    if (r_attr->pathVecTlv.lsrId[i]) {
      count++;
      if (r_attr->pathVecTlv.lsrId[i] == g->lsr_identifier.u.ipv4) {
        goto Check_Received_Attributes_6;
        LDP_PRINT(g->user_data, "CRa.4a\n");
      }
      if (count > s->oper_path_vector_limit) {
        goto Check_Received_Attributes_6;
        LDP_PRINT(g->user_data, "CRa.4b\n");
      }
    }
  }

Check_Received_Attributes_5:
  return MPLS_SUCCESS;

Check_Received_Attributes_6:
  if (type != MPLS_LBLMAP_MSGTYPE) {
    ldp_notif_send(g, s, r_attr, LDP_NOTIF_LOOP_DETECTED); /* CRa.7 */
  }
  return MPLS_FAILURE;           /* CRa.8 */
}
示例#5
0
mpls_return_enum ldp_mesg_hello_get_hellotime(ldp_mesg * msg, int *hellotime)
{
    MPLS_MSGPTR(Hello);
    MPLS_ASSERT(msg && hellotime);

    MPLS_MSGPARAM(Hello) = &msg->u.hello;
    if (!MPLS_MSGPARAM(Hello)->chpTlvExists) {
        LDP_PRINT(NULL, "No chp!");
        return MPLS_FAILURE;
    }

    *hellotime = MPLS_MSGPARAM(Hello)->chp.holdTime;

    return MPLS_SUCCESS;
}
示例#6
0
static mpls_return_enum ldp_fec_insert(ldp_global *g, ldp_fec * fec)
{
  mpls_return_enum retval = MPLS_SUCCESS;
  uint32_t key;
  uint8_t len;

  MPLS_ASSERT(g && fec);
  LDP_ENTER(g->user_data, "ldp_fec_insert");

  switch(fec->info.type) {
    case MPLS_FEC_PREFIX:
      key = fec->info.u.prefix.network.u.ipv4;
      len = fec->info.u.prefix.length;
      break;
    case MPLS_FEC_HOST:
      key = fec->info.u.host.u.ipv4;
      len = 32;
      break;
    case MPLS_FEC_L2CC:
      /* they had better insert it into the global list */
      LDP_EXIT(g->user_data, "ldp_fec_insert: l2cc");
      return MPLS_SUCCESS;
    case MPLS_PW_ID_FEC: //testing
      len=32;
      break;
    default:
      MPLS_ASSERT(0);
  }

  if (mpls_tree_insert(g->fec_tree, key, len, (void *)fec) != MPLS_SUCCESS) {
    LDP_PRINT(g->user_data, "ldp_fec_insert: error adding fec\n");
    retval = MPLS_FATAL;
  }

  LDP_EXIT(g->user_data, "ldp_fec_insert");
  return retval;
}
示例#7
0
void ldp_adj_delete(ldp_adj * a)
{
    LDP_PRINT(NULL,"adj delete %p", a);
    MPLS_REFCNT_ASSERT(a, 0);
    mpls_free(a);
}
示例#8
0
mpls_return_enum ldp_label_request_send(ldp_global * g, ldp_session * s,
  ldp_attr * us_attr, ldp_attr ** ds_attr)
{
  ldp_attr *ds_temp;
  mpls_fec fec;

  LDP_ENTER(g->user_data, "ldp_label_request_send");
  MPLS_ASSERT(ds_attr && *ds_attr);

  fec_tlv2mpls_fec(&((*ds_attr)->fecTlv), 0, &fec);

  if ((ds_temp = ldp_attr_find_downstream_state(g, s, &fec,
        LDP_LSP_STATE_REQ_SENT)) != NULL) { /* SLRq.1 */

    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL,
      "Label Request Send: request already pending(%d)\n", ds_temp->index);

    ldp_attr_add_us2ds(us_attr, ds_temp);

    /* we do not need the one passed in, but make sure that the caller
       is using this one from here forth */
    ldp_attr_remove_complete(g, *ds_attr, MPLS_BOOL_TRUE);
    *ds_attr = ds_temp;
    return MPLS_SUCCESS;
  }

  if (s->no_label_resource_recv == MPLS_BOOL_TRUE) { /* SLRq.2 */
    goto ldp_label_request_send_error;
  }

  (*ds_attr)->msg_id = g->message_identifier++;
  ldp_label_request_prepare_msg(s->tx_message, (*ds_attr)->msg_id, *ds_attr);

  LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_LABEL,
    "Label Request Sent: session(%d)\n", s->index);

  if (ldp_mesg_send_tcp(g, s, s->tx_message) == MPLS_FAILURE) { /* SLRq.3 */
    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_ERROR,
      "Label Request send failed\n");
    goto ldp_label_request_send_error;
  }

  (*ds_attr)->state = LDP_LSP_STATE_REQ_SENT;
  if (ldp_attr_insert_downstream(g, s, (*ds_attr)) == MPLS_FAILURE) { /* SLRq.4 */
    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_SEND, LDP_TRACE_FLAG_ERROR,
      "Couldn't insert sent attributes in tree\n");
    goto ldp_label_request_send_error;
  }
  if (us_attr) {
    ldp_attr_add_us2ds(us_attr, *ds_attr);
  }

  LDP_EXIT(g->user_data, "ldp_label_request_send");

  return MPLS_SUCCESS;           /* SLRq.5 */

ldp_label_request_send_error:

  LDP_PRINT(g->user_data, "SLRq.6\n");
  (*ds_attr)->state = LDP_LSP_STATE_NO_LABEL_RESOURCE_SENT;
  ldp_attr_insert_downstream(g, s, (*ds_attr)); /* SLRq.6 */

  LDP_EXIT(g->user_data, "ldp_label_request_send-error");

  return MPLS_FAILURE;           /* SLRq.7 */
}
示例#9
0
mpls_return_enum ldp_event(mpls_cfg_handle handle, mpls_socket_handle socket,
  void *extra, ldp_event_enum event)
{
  mpls_return_enum retval = MPLS_SUCCESS;
  ldp_global *g = (ldp_global*)handle;

  mpls_socket_handle socket_new = (mpls_socket_handle)0;
  ldp_session *session = NULL;
  ldp_entity *entity = NULL;
  ldp_adj *adj = NULL;

  uint8_t buffer[MPLS_PDUMAXLEN];
  mpls_dest from;
  ldp_mesg mesg;
  ldp_buf buf;

  LDP_ENTER(g->user_data, "ldp_event");

  mpls_lock_get(g->global_lock);

  switch (event) {
    case LDP_EVENT_TCP_DATA:
    case LDP_EVENT_UDP_DATA:
    {
      mpls_bool more;

      buf.current = buffer;
      buf.buffer = buffer;
      buf.total = MPLS_PDUMAXLEN;
      buf.size = 0;
      buf.current_size = 0;
      buf.want = 0;

      /* do this so a failure will know which session caused it */
      if (event == LDP_EVENT_TCP_DATA) {
        session = extra;
      }

      do {
        retval = ldp_buf_process(g, socket, &buf, extra, event, &from, &more);
      } while (retval == MPLS_SUCCESS && more == MPLS_BOOL_TRUE);
      break;
    }
    case LDP_EVENT_TCP_LISTEN:
    {
      socket_new = mpls_socket_tcp_accept(g->socket_handle, socket, &from);

      if (mpls_socket_handle_verify(g->socket_handle, socket_new) ==
        MPLS_BOOL_FALSE) {
        LDP_PRINT(g->user_data, "Failed accepting socket\n");
        retval = MPLS_FAILURE;
      } else if (!(session = ldp_session_create_passive(g, socket_new,
        &from))) {
        mpls_socket_close(g->socket_handle, socket_new);
        LDP_PRINT(g->user_data, "Failure creating passive session\n");
        retval = MPLS_FATAL;
      } else {
        retval = ldp_state_machine(g, session, NULL, NULL,
          LDP_EVENT_CONNECT, &mesg, &from);
      }
      break;
    }
    case LDP_EVENT_TCP_CONNECT:
    {
      retval = mpls_socket_connect_status(g->socket_handle, socket);
      session = (ldp_session *)extra;

      if (retval == MPLS_SUCCESS) {
        /* only get this case if we did a non-block connect */
        mpls_socket_writelist_del(g->socket_handle, socket);
        retval = ldp_state_machine(g, session, NULL, NULL,
          LDP_EVENT_CONNECT, &mesg, &from);
      } else if (retval != MPLS_NON_BLOCKING) {
        LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR,
          "ldp_event: LDP_EVENT_TCP_CONNECT errno = %d\n",
          mpls_socket_get_errno(g->socket_handle, socket));
      } else {
	/* non-blocking connect is still blocking, we'll try again in a bit */
	retval = MPLS_SUCCESS;
      }
      break;
    }
    case LDP_EVENT_CLOSE:
    {
      retval = ldp_state_machine(g, session, adj, entity,
        LDP_EVENT_CLOSE, &mesg, &from);
      break;
    }
    default:
    {
      MPLS_ASSERT(0);
    }
  }

  /* ldp_state_machine return MPLS_SUCCESS when it has handled the event
     to completion. If the handling off the event results in the session
     needing to be shutdown MPLS_FAILURE is returned.  If the handling of
     the event requires the LDP be shutdown LD_FATAL is returned, and
     passed back to the user.  other values are invalid */

  switch (retval) {
    case MPLS_FAILURE:
    {
      /* if shutting down the session results in LDP_FATAL, then pass it
       * back to the user */

      LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR,
        "ldp_event: FAILURE executing a CLOSE\n");

      retval = ldp_state_machine(g, session, adj, entity, LDP_EVENT_CLOSE,
        NULL, &from);

      if (retval == MPLS_FATAL) {
        LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR,
          "ldp_event: CLOSE failed: FATAL propogated to the environemnt\n");
      }
      break;
    }
    case MPLS_FATAL:
    {
      LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR,
        "ldp_event: FATAL propogated to the environemnt\n");
      break;
    }
    case MPLS_SUCCESS:
    {
      break;
    }
    default:
    {
      LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_ERROR,
        "ldp_event: invalid return value of %d\n", retval);
      break;
    }
  }

  mpls_lock_release(g->global_lock);

  LDP_EXIT(g->user_data, "ldp_event");

  return retval;
}
示例#10
0
mpls_return_enum ldp_label_mapping_process(ldp_global * g, ldp_session * s,
  ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f)
{
  mpls_return_enum retval = MPLS_SUCCESS;
  ldp_session *peer = NULL;
  ldp_attr_list *us_list = NULL;
  ldp_attr_list *ds_list = NULL;
  ldp_attr *ds_attr = NULL;
  ldp_attr *ds_temp = NULL;
  ldp_attr *us_attr = NULL;
  ldp_attr *us_temp = NULL;
  ldp_attr dumb_attr;
  ldp_nexthop *nh = NULL;

  ldp_outlabel *out = NULL;
  mpls_bool requested = MPLS_BOOL_FALSE;
  ldp_attr *existing = NULL;
  mpls_bool need_request = MPLS_BOOL_FALSE;

  LDP_ENTER(g->user_data, "ldp_label_mapping_process");

  LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL,
    "Label Mapping Recv from %s for %08x/%d\n",
    s->session_name,
    r_attr->fecTlv.fecElArray[0].addressEl.address,
    r_attr->fecTlv.fecElArray[0].addressEl.preLen);

  if ((ds_attr = ldp_attr_find_downstream_state2(g, s, f,
        LDP_LSP_STATE_REQ_SENT)) != NULL) { /* LMp.1 */
    /* just remove the req from the tree, we will use the r_attr sent to us */
    ldp_attr_delete_downstream(g, s, ds_attr);
    requested = MPLS_BOOL_TRUE;
  } else {
    requested = MPLS_BOOL_FALSE;
  }

  ds_attr = r_attr;
  ds_attr->state = LDP_LSP_STATE_MAP_RECV; /* LMp.2 */

  /*
   * ds_attr is the mapping we will keep and is NOT in the tree, unless
   * it is an update mapping ...
   */
  if (Check_Received_Attributes(g, s, ds_attr, MPLS_LBLMAP_MSGTYPE) ==
    MPLS_SUCCESS) { /* LMp.3 */
    goto LMp_9;
  }

  /*
   * A loop was detected
   */
  if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) {
    ds_temp = MPLS_LIST_HEAD(ds_list);
    /*
     * check all the labels this session has received from "s" for "fec"
     * do we have a duplicat?
     */
    while (ds_temp) {
      if ((ds_temp->state == LDP_LSP_STATE_MAP_RECV) && /* LMp.4 */
        ldp_attr_is_equal(ds_temp, ds_attr, LDP_ATTR_LABEL) == /* LMp.5 */
        MPLS_BOOL_TRUE) {
        /* remove record of the label and remove it switching */
        ldp_attr_remove_complete(g, ds_temp, MPLS_BOOL_TRUE); /* LMp.6,7 */
        /*
         * I think this is supposed to be 32 NOT 33, we need to release
         * it don't we?
         */
        goto LMp_33;
      }
      ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs);
    }
  }

  LDP_PRINT(g->user_data, "Receive_Label_Map_8: send release");
  if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_LOOP_DETECTED) !=
    MPLS_SUCCESS) { /* LMp.8 */
    retval = MPLS_FAILURE;
  }
  goto LMp_33;

LMp_9:
  /*
   * No Loop Detected
   */
  ds_temp = ldp_attr_find_downstream_state2(g, s, f, LDP_LSP_STATE_MAP_RECV);
  if (requested == MPLS_BOOL_TRUE ||
      g->label_merge == MPLS_BOOL_FALSE || !ds_temp) {
    /* !merging then this is always a new LSP
     * merging w/o a recv'd mapping is a new LSP
     * this check comes from Note 6
     */
    goto LMp_11;
  }

  /* searching all recv'd attrs for matched mappings,
   * stop after finding 1st match
   */
  if ((ds_list = ldp_attr_find_downstream_all2(g, s, f))) {
    ds_temp = MPLS_LIST_HEAD(ds_list);
    while (ds_temp) {
      if (ds_temp->state == LDP_LSP_STATE_MAP_RECV) { /* LMp.9 */
        if (ldp_attr_is_equal(ds_attr, ds_temp, LDP_ATTR_LABEL) ==
          MPLS_BOOL_TRUE) { /* LMp.10 */
          /*
           * this mapping matches an existing mapping, but it
           * could contain updated attributes
           */
          existing = ds_temp;
          break;
        } else {
          /*
           * we have been given another label for the same FEC and we
           * didn't request it, release it
           */
          LDP_PRINT(g->user_data, "LMp.10 dup without req\n");
          goto LMp_32;
        }
      }
      ds_temp = MPLS_LIST_NEXT(ds_list, ds_temp, _fs);
    }
  }
  if (existing) {
    ldp_attr2ldp_attr(ds_attr, existing, LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH |
      LDP_ATTR_MSGID | LDP_ATTR_LSPID | LDP_ATTR_TRAFFIC);
    ds_attr = existing;
    /*
     * no need to free ds_attr, since it was not added to the tree it
     * will be deleted when we exit ldp_label_mapping_process(), see
     * ldp_state_process().
     */
  }
  /*
   * from this point on.... if this is an updated mapping then ds_attr
   * is the existing mapping which has now been update, else ds_attr
   * is the new mapping
   */

LMp_11:
  /*
   * existing ONLY has a value for updated label mapping
   */
  nh = ldp_nexthop_for_fec_session(f,s);			 /* LMp.11 */

  /*
   * the following departs from the procedure, it allows for filtering
   * of label mappings
   *
   * Are we configured to accept and INSTALL this mapping?
   */
  if (mpls_policy_import_check(g->user_data, &f->info, &nh->info) ==
    MPLS_BOOL_FALSE) {
    /*
     * policy has rejected it, store it away
     */
    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL,
      "Label Mapping for %08x/%d from %s filtered by import policy\n",
      r_attr->fecTlv.fecElArray[0].addressEl.address,
      r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name);

    if (existing) {
      ds_attr->filtered = MPLS_BOOL_TRUE;
      if (ds_attr->outlabel && ds_attr->outlabel->switching == MPLS_BOOL_TRUE) {
        /* the mapping has been filtered, but the original wasn't? */
        MPLS_ASSERT(0);
      }
    } else {
      ds_attr->filtered = MPLS_BOOL_TRUE;
      if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) {
        retval = MPLS_FAILURE;
      }
    } 
    goto LMp_33;
  }

  if (!nh) {							 /* LMp.12 */
    /*
     * if we did not find a nh hop for this FEC that corresponded to the
     * MsgSource then the MsgSource is not a nexthop for the FEC
     */
    if (g->label_retention_mode == LDP_RETENTION_CONSERVATIVE) { /* LMp.13C */
      LDP_PRINT(g->user_data, "LMp.13C conservative\n");
      goto LMp_32;
    }

    /*
     * store it away
     */
    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL,
      "Session %s is not a valid nexthop for %08x/%d\n", s->session_name,
      r_attr->fecTlv.fecElArray[0].addressEl.address,
      r_attr->fecTlv.fecElArray[0].addressEl.preLen);

      if (!existing) {
      /* LMp.13L */
      if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) {
        retval = MPLS_FAILURE;
      }
    }
    goto LMp_33;
  }

  /*
   * this is slightly different form the procedure, we can still be
   * transit for a FEC we are not configured to be ingress for.
   * Either way we only need to do the "install for fwd/switching"
   * only once.  We could arrive here multiple times due to updates,
   * only install it the first time
   */
  if ((!existing) || (!existing->outlabel)) {
    /*
     * we haven't installed it yet.
     * Either new (!existing), or a result of a "Detect FEC Nexthop Change"
     * and we had this mapping in our database (!existing->outlabel))
     */

    if (!(out = ldp_outlabel_create_complete(g, s, ds_attr, nh))) {
      LDP_PRINT(g->user_data, "LMp.15 failure creating outlabel\n");
      goto LMp_32;
    }

    LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_ALL, LDP_TRACE_FLAG_BINDING,
      "Out Label Added\n");
  }

  /*
   * are we configured to act as ingress for this FEC?
   */
  if (mpls_policy_ingress_check(g->user_data, &f->info, &nh->info) ==
    MPLS_BOOL_TRUE) { /* LMp.14 */
    /*
     * yep, bind the label to the FEC
     */
    if (ds_attr->ingress != MPLS_BOOL_TRUE) {
#if MPLS_USE_LSR
      lsr_ftn ftn;
      ftn.outsegment_index = ds_attr->outlabel->info.handle;
      memcpy(&ftn.fec, &f->info, sizeof(mpls_fec));
      lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_ADD|LSR_FTN_CFG_FEC|
        LSR_FTN_CFG_OUTSEGMENT);
#else
      mpls_mpls_fec2out_add(g->mpls_handle, &f->info, &ds_attr->outlabel->info);
#endif
      ds_attr->ingress = MPLS_BOOL_TRUE;
      ds_attr->outlabel->merge_count++;
      LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_BINDING,
        "Acting as ingress for %08x/%d from %s\n",
        r_attr->fecTlv.fecElArray[0].addressEl.address,
        r_attr->fecTlv.fecElArray[0].addressEl.preLen, s->session_name);
    }
  }

  /* create a set of attrs that we will fill and compare against
   * if this mapping were to be propogate these are the attrs it would have
   * by comparing what we did sent in the past to these, we con figure out
   * if we need to send an updated mapping
   */
  memset(&dumb_attr, 0, sizeof(ldp_attr));
  mpls_fec2fec_tlv(&f->info, &dumb_attr.fecTlv, 0);
  dumb_attr.fecTlvExists = 1;
  dumb_attr.fecTlv.numberFecElements = 1;

  /*
   * by definition (we received a label mapping that will be used) this
   * LSR is _not_ the egress, so calculate a hop and path based on the
   * mapping we received.  We will compare this with mapping that have
   * already been sent.  If they differ, we will send an updated mapping
   */
  Prepare_Label_Mapping_Attributes(g, s, &f->info, ds_attr, &dumb_attr,
    MPLS_BOOL_TRUE, MPLS_BOOL_TRUE, MPLS_BOOL_FALSE);

  if (!existing) {
    /*
     * this is the first time we've seen this mapping, add it to the database.
     * all future updates will modify this entry in place
     */
    /* LMp.16 */ printf("!!!LMp16!!!\n");
    if (ldp_attr_insert_downstream(g, s, ds_attr) != MPLS_SUCCESS) {
      retval = MPLS_FAILURE;
      goto LMp_33;
    }
  }

  peer = MPLS_LIST_HEAD(&g->session);
  while (peer) {					/* LMp.17 */

    if (peer->state != LDP_STATE_OPERATIONAL) {
      goto next_peer;
    }

    /*
     * it is just as easy to walk the list of all upstream attr for this
     * peer as it is to the individual check to see if we have sent a
     * label mapping for this FEC LSP
     */

// #error this whole section is f ed

    /* LMp.22 - 27 */
    if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) {	/* LMp.23 */
      us_temp = MPLS_LIST_HEAD(us_list);
      while (us_temp) {
	/*
	 * if we have sent a label mapping for the FEC and that label mapping
	 * was an done in independent mode or it is part of an LSP created
         * due as part of an existing received label mapping
	 */
	/* LMp.18 */
        if (us_temp->state == LDP_LSP_STATE_MAP_SENT) {
          LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV,
            LDP_TRACE_FLAG_BINDING, "Already sent mapping for %08x/%d to %s\n",
            r_attr->fecTlv.fecElArray[0].addressEl.address,
            r_attr->fecTlv.fecElArray[0].addressEl.preLen, peer->session_name);
          if ((!existing) || (existing->index == us_temp->ds_attr->index)) {
            LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV,
              LDP_TRACE_FLAG_BINDING, "Part of same LSP\n");
            /* are the received attrs the same as the ones we've already sent */
            if (ldp_attr_is_equal(us_temp, &dumb_attr,
                LDP_ATTR_HOPCOUNT | LDP_ATTR_PATH) != MPLS_BOOL_TRUE) {
              LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV,
                LDP_TRACE_FLAG_BINDING, "Propogating updated attrs\n");
              /* send an updated label mapping */
              if (ldp_label_mapping_with_xc(g, us_temp->session, f, &us_temp,
                  ds_attr) != MPLS_SUCCESS) {			/* LMp.24-26 */
                retval = MPLS_FAILURE;
                goto LMp_33;
              }
            }
          }
        }
        us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs);
      }
    }

    if ((peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) &&
      (g->lsp_control_mode == LDP_CONTROL_ORDERED)) { /* LMp.19 */

      /*
       * if we're not merging and we have multiple ORDERED DU sessions,
       * we will to start requesting labels after we propogate the mapping to
       * the first peer
       */
      if (need_request == MPLS_BOOL_TRUE) {
        if (ldp_attr_find_downstream_state2(g, peer, f,
            LDP_LSP_STATE_REQ_SENT) == NULL) {
          /*
           * we don't have a request for FEC to peer outstanding, make one
           */
          ds_temp = NULL;
          if (ldp_label_request_for_xc(g, peer, &f->info, NULL, &ds_temp) !=
            MPLS_SUCCESS) {
            retval = MPLS_FAILURE;
            goto LMp_33;
          }
        }
      } else {
        /*
         * We're in DU more, either we're merging, or we're not merging and
         * this is the first peer we're propogating this mapping to
         */
        /* LMp.20-21,30 */
        us_attr = NULL;
        if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) !=
          MPLS_SUCCESS) {
          retval = MPLS_FAILURE;
          goto LMp_33;
        }
        /*
         * if we're not merging, we will need to request a label for
         * the next DU peer
         */
        if (g->label_merge == MPLS_BOOL_FALSE) {
          need_request = MPLS_BOOL_TRUE;
        }
      }
    }

    /* LMp.28 */
    while ((us_temp = ldp_attr_find_upstream_state2(g, peer, f,
      LDP_LSP_STATE_REQ_RECV))) {

      if (peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) {
        if (need_request == MPLS_BOOL_TRUE) {
          if (ldp_attr_find_downstream_state2(g, peer, f,
            LDP_LSP_STATE_REQ_SENT) == NULL) {
            /* 
             * we don't have a request for FEC to peer outstanding
             */
            ds_temp = NULL;
            if (ldp_label_request_for_xc(g, peer, &f->info, us_temp,
                &ds_temp) != MPLS_SUCCESS) {
              retval = MPLS_FAILURE;
              goto LMp_33;
            }
          }
        } else {
          if (ldp_label_mapping_with_xc(g, peer, f, &us_temp,
            ds_attr) != MPLS_SUCCESS) {
            retval = MPLS_FAILURE;
            goto LMp_33;
          }
        }
      } else {
        if ((us_list = ldp_attr_find_upstream_all2(g, peer, f))) {
          us_temp = MPLS_LIST_HEAD(ds_list);
          while (us_temp) {
            if (us_temp->state == LDP_LSP_STATE_REQ_RECV) {
              if (need_request == MPLS_BOOL_TRUE) {
                if (ldp_attr_find_downstream_state2(g, peer, f,
                  LDP_LSP_STATE_REQ_SENT) == NULL) {
                  /*
                   * we don't have a request for FEC to peer outstanding
                   */
                  ds_temp = NULL;
                  if (ldp_label_request_for_xc(g, peer, &f->info, us_temp,
                      &ds_temp) != MPLS_SUCCESS) {
                    retval = MPLS_FAILURE;
                    goto LMp_33;
                  }
                }
              } else {
                if (ldp_label_mapping_with_xc(g, peer, f, &us_temp,
                    ds_attr) != MPLS_SUCCESS) {
                  retval = MPLS_FAILURE;
                  goto LMp_33;
                }
                /*
                 * if we're not merging, we will need to request a label for
                 * the next DU peer
                 */
                if (g->label_merge == MPLS_BOOL_FALSE) {
                  need_request = MPLS_BOOL_TRUE;
                }
              }
            }
            us_temp = MPLS_LIST_NEXT(us_list, us_temp, _fs);
          }
        }
      }
    }

  next_peer:
    peer = MPLS_LIST_NEXT(&g->session, peer, _global);
  }

LMp_33:
  LDP_EXIT(g->user_data, "ldp_label_mapping_process");
  return retval;

LMp_32:
  LDP_PRINT(g->user_data, "Receive_Label_Map_32: send release");
  if (ldp_label_release_send(g, s, ds_attr, LDP_NOTIF_NONE) != MPLS_SUCCESS) {
    retval = MPLS_FAILURE;
  }
  LDP_EXIT(g->user_data, "ldp_label_mapping_process");
  return retval;
}
mpls_return_enum ldp_label_release_process(ldp_global * g, ldp_session * s,
  ldp_adj * a, ldp_entity * e, ldp_attr * r_attr, ldp_fec * f)
{
  mpls_bool label_exists = MPLS_BOOL_FALSE;
  ldp_attr *us_attr = NULL;
  ldp_attr *ds_attr = NULL;
  mpls_return_enum retval = MPLS_SUCCESS;

  LDP_ENTER(g->user_data, "ldp_label_release_process");

  LDP_TRACE_LOG(g->user_data, MPLS_TRACE_STATE_RECV, LDP_TRACE_FLAG_LABEL,
    "Release Recv from %s\n", s->session_name);

  if (r_attr->genLblTlvExists || r_attr->atmLblTlvExists
    || r_attr->frLblTlvExists) {
    label_exists = MPLS_BOOL_TRUE;
  }

  if (f) {
    /* LRl.1 is accomplished at LRl.10 */
    us_attr = ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_MAP_SENT);
    if (!us_attr) {
      us_attr =
        ldp_attr_find_upstream_state2(g, s, f, LDP_LSP_STATE_WITH_SENT);
      if (!us_attr) {           /* LRl.2 */
        goto LRl_13;
      }
      /* LRl.3 is accomplished at LRl.10 */
    }

    if (g->label_merge == MPLS_BOOL_FALSE) { /* LR1.4 */
      goto LRl_6;
    }
    /* LR1.5 */
    if (ldp_attr_find_upstream_state_any2(g, f, LDP_LSP_STATE_MAP_SENT)) {
      goto LRl_10;
    }

  LRl_6:
    /* we can only propogate a release to the downstream attached to
       the upstream we found up top */
    /* LRl.6,7 */
    if (us_attr->ds_attr && us_attr->ds_attr->state == LDP_LSP_STATE_MAP_RECV) {
      ds_attr = us_attr->ds_attr;
    } else {
      goto LRl_10;
    }

    if (g->propagate_release == MPLS_BOOL_FALSE) { /* LRl.8 */
      goto LRl_10;
    }

    if (ldp_label_release_send(g, ds_attr->session, ds_attr,
      LDP_NOTIF_NONE) != MPLS_SUCCESS) { /* LRl.9 */
      retval = MPLS_FAILURE;
    }
    ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE);

  LRl_10:
    ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE); /* LRl.10,11 */

  } else {
    LDP_PRINT(g->user_data, "No FEC in release, need to implement\n");
    MPLS_ASSERT(0);
  }

LRl_13:
  LDP_EXIT(g->user_data, "ldp_label_release_process");
  return retval;
}
示例#12
0
void ldp_inlabel_delete(ldp_inlabel * i)
{
  LDP_PRINT(g->user_data,"inlabel delete\n");
  MPLS_REFCNT_ASSERT(i, 0);
  mpls_free(i);
}