static zb_ret_t zb_get_buf_delayed(zb_callback_t callback, zb_uint8_t is_in) { zb_buf_t *buf = zb_get_buf(is_in); if( buf ) { return ZB_SCHEDULE_CALLBACK( callback, ZB_REF_FROM_BUF(buf)); } else { zb_buf_q_ent_t *ent; VERIFY_BUFS(); ZB_STK_POP(ZG->sched.buf_freelist, next, ent); if (ent) { VERIFY_BUFS(); ent->func = callback; if (is_in) { ZB_SL_LIST_INSERT_TAIL(ZG->sched.inbuf_queue, next, ent); } else { ZB_SL_LIST_INSERT_TAIL(ZG->sched.outbuf_queue, next, ent); } VERIFY_BUFS(); } else { return RET_ERROR; } } return RET_OK; }
/* * Generate and send a route reply command frame. */ static zb_ret_t zb_nwk_mesh_send_rrep(zb_buf_t *cbuf, zb_uint8_t rreq_id, zb_uint16_t originator, zb_uint16_t responder, zb_uint8_t path_cost, zb_uint16_t sender_addr) { zb_ret_t ret = RET_OK; zb_nwk_hdr_t *nwhdr; zb_nwk_cmd_rrep_t *rrep; zb_uint8_t secure = 0; #ifdef ZB_SECURITY secure = (ZG->aps.authenticated && ZG->nwk.nib.secure_all_frames && ZG->nwk.nib.security_level); #endif TRACE_MSG(TRACE_NWK1, ">> send_rrep cbuf %p rreq_id %d orig %d resp %d p_cost %hd s_addr %d", (FMT__P_D_D_D_H_D, cbuf, rreq_id, originator, responder, path_cost, sender_addr)); nwhdr = nwk_alloc_and_fill_hdr(cbuf, sender_addr, NULL, NULL, ZB_FALSE, secure, ZB_TRUE); rrep = (zb_nwk_cmd_rrep_t *)nwk_alloc_and_fill_cmd(cbuf, ZB_NWK_CMD_ROUTE_REPLY, sizeof(zb_nwk_cmd_rrep_t)); rrep->opt = 0; rrep->rreq_id = rreq_id; rrep->originator = originator; rrep->responder= responder; rrep->path_cost = path_cost; nwhdr->radius = (zb_uint8_t)(ZB_NIB_MAX_DEPTH() << 1); ZB_NWK_ADDR_TO_LE16(rrep->originator); ZB_NWK_ADDR_TO_LE16(rrep->responder); ZB_SET_BUF_PARAM(cbuf, ZB_NWK_INTERNAL_NSDU_HANDLE, zb_uint8_t); ZB_SCHEDULE_CALLBACK(zb_nwk_forward, ZB_REF_FROM_BUF(cbuf)); TRACE_MSG(TRACE_NWK1, "<< snd_rrep %d", (FMT__D, ret)); return ret; }
static zb_buf_t *zb_get_buf(zb_uint8_t is_in) { zb_buf_t *buf = NULL; /* check that zb_init_buffers() was called */ ZB_ASSERT(ZG->bpool.head || ZG->bpool.bufs_allocated[0] || ZG->bpool.bufs_allocated[1]); /* Logically pool divided into 2 parts: input or output. Do not allow one part to eat entire another part to exclude deadlock. */ if (ZG->bpool.bufs_allocated[is_in] < ZB_BUFS_LIMIT) { buf = ZG->bpool.head; if (buf) { VERIFY_BUFS(); ZG->bpool.head = buf->u.next; VERIFY_BUFS(); ZB_BZERO(&buf->u, sizeof(buf->u)); ZG->bpool.bufs_allocated[is_in]++; ZB_ASSERT(ZG->bpool.bufs_allocated[is_in] <= ZB_BUFS_LIMIT); buf->u.hdr.is_in_buf = is_in; } } #ifdef ZB_DEBUG_BUFFERS TRACE_MSG( TRACE_MAC1, "zb_get_buf %hd: buffer %p, ref %hd, head %p, allocated %hd / %hd", (FMT__H_P_H_P_H, is_in, buf, ZB_REF_FROM_BUF(buf), ZG->bpool.head, ZG->bpool.bufs_allocated[0], ZG->bpool.bufs_allocated[1])); #endif return buf; }
static void send_data() { zb_buf_t *buf = NULL; zb_apsde_data_req_t req; zb_uint8_t *ptr = NULL; zb_short_t i; buf = zb_get_out_buf(); req.dst_addr.addr_short = 0; /* send to ZC */ req.addr_mode = ZB_APS_ADDR_MODE_16_ENDP_PRESENT; req.tx_options = ZB_APSDE_TX_OPT_ACK_TX; req.radius = 1; req.profileid = 2; req.src_endpoint = 10; req.dst_endpoint = 10; buf->u.hdr.handle = 0x11; ZB_BUF_INITIAL_ALLOC(buf, 80, ptr); for (i = 0 ; i < ZB_TEST_DATA_SIZE ; ++i) { ptr[i] = i % 32 + '0'; } ZB_MEMCPY( ZB_GET_BUF_TAIL(buf, sizeof(req)), &req, sizeof(req)); TRACE_MSG(TRACE_APS3, "Sending apsde_data.request", (FMT__0)); ZB_SCHEDULE_CALLBACK(zb_apsde_data_request, ZB_REF_FROM_BUF(buf)); }
void zb_free_buf(zb_buf_t *buf) { /* do trace this function, because it can cause lack of out buffers */ zb_buf_q_ent_t *ent = NULL; /* check that zb_init_buffers() was called */ ZB_ASSERT(ZG->bpool.head || ZG->bpool.bufs_allocated[0] || ZG->bpool.bufs_allocated[1]); ZB_ASSERT(ZG->bpool.bufs_allocated[buf->u.hdr.is_in_buf] > 0); ZG->bpool.bufs_allocated[buf->u.hdr.is_in_buf]--; #ifdef ZB_DEBUG_BUFFERS TRACE_MSG(TRACE_NWK3, "zb_free_buf %p, ref %hd, in buf %hi allocated in %hd out %hd", (FMT__P_H_H_H_H, buf, ZB_REF_FROM_BUF(buf), buf->u.hdr, buf->u.hdr.is_in_buf, ZG->bpool.bufs_allocated[1], ZG->bpool.bufs_allocated[0])); #endif VERIFY_BUF(buf); buf->u.next = ZG->bpool.head; ZG->bpool.head = buf; VERIFY_BUFS(); if (buf->u.hdr.is_in_buf) { /* if we need a buffer for rx packet, we should not pass it to some */ /* other callback */ if (!MAC_CTX().rx_need_buf) ZB_SL_LIST_CUT_HEAD(ZG->sched.inbuf_queue, next, ent); } else { ZB_SL_LIST_CUT_HEAD(ZG->sched.outbuf_queue, next, ent); } if (ent) { ZB_SCHEDULE_CALLBACK(ent->func, ZB_REF_FROM_BUF(zb_get_buf(buf->u.hdr.is_in_buf))); ZB_STK_PUSH(ZG->sched.buf_freelist, next, ent); } #ifdef ZB_DEBUG_BUFFERS TRACE_MSG( TRACE_MAC1, "free_buf: %hd/%hd buf %p, next %p, head %p", (FMT__H_H_P_P_P, ZG->bpool.bufs_allocated[1], ZG->bpool.bufs_allocated[0], buf, buf->u.next, ZG->bpool.head)); #endif }
void zb_mac_store_pan_desc(zb_buf_t *beacon_buf) { zb_uint8_t *mac_hdr = ZB_MAC_GET_FCF_PTR(ZB_BUF_BEGIN(beacon_buf)); zb_mac_mhr_t mhr; zb_uint8_t mhr_len; zb_pan_descriptor_t pan_desc; zb_pan_descriptor_t *pan_desc_buf; zb_buf_t *desc_list_buf; zb_uint8_t desc_count; TRACE_MSG(TRACE_NWK1, ">>store_pan_desc %p", (FMT__P, beacon_buf)); mhr_len = zb_parse_mhr(&mhr, mac_hdr); TRACE_MSG(TRACE_NWK3, "add pan desc", (FMT__0)); pan_desc.coord_addr_mode = ZB_FCF_GET_SRC_ADDRESSING_MODE(&mhr.frame_control); pan_desc.coord_pan_id = mhr.src_pan_id; ZB_MEMCPY(&pan_desc.coord_address, &mhr.src_addr, sizeof(union zb_addr_u)); pan_desc.logical_channel = ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL(); ZB_GET_SUPERFRAME(mac_hdr, mhr_len, &pan_desc.super_frame_spec); pan_desc.gts_permit = 0; /* use ZB_MAC_GET_GTS_FIELDS() to get exact gts value. Zigbee uses beaconless mode, so gts is not used always */ pan_desc.link_quality = ZB_MAC_GET_LQI(beacon_buf); if (MAC_CTX().rt_ctx.active_scan.pan_desc_buf_param == ZB_UNDEFINED_BUFFER) { desc_list_buf = beacon_buf; ZB_BUF_REUSE(desc_list_buf); MAC_CTX().rt_ctx.active_scan.pan_desc_buf_param = ZB_REF_FROM_BUF(beacon_buf); } else { desc_list_buf = ZB_BUF_FROM_REF(MAC_CTX().rt_ctx.active_scan.pan_desc_buf_param); } /* do not calculate pan descriptors number - it can be calculated using buffer length */ /* in this check take into account size of scan confirm structure - descriptors will follow it */ desc_count = ZB_BUF_LEN(desc_list_buf) / sizeof(zb_pan_descriptor_t); if ( (ZB_BUF_GET_FREE_SIZE(desc_list_buf) >= (sizeof(zb_pan_descriptor_t) + sizeof(zb_mac_scan_confirm_t))) && desc_count < ZB_ACTIVE_SCAN_MAX_PAN_DESC_COUNT) { ZB_BUF_ALLOC_RIGHT(desc_list_buf, sizeof(zb_pan_descriptor_t), pan_desc_buf); ZB_MEMCPY(pan_desc_buf, &pan_desc, sizeof(zb_pan_descriptor_t)); } else { TRACE_MSG(TRACE_NWK3, "stop scan, no free space", (FMT__0)); MAC_CTX().rt_ctx.active_scan.stop_scan = 1; } TRACE_MSG(TRACE_NWK1, "<<store_pan_desc", (FMT__0)); }
void trace_bufs() { zb_buf_t *p; TRACE_MSG(TRACE_MAC1, "buffers verify : head %p", (FMT__P, ZG->bpool.head)); for (p = ZG->bpool.head ; p ; p = p->u.next) { TRACE_MSG(TRACE_MAC1, "buf %p %hd next %p", (FMT__P_H_P, p, ZB_REF_FROM_BUF(p), p->u.next)); } }
zb_ret_t zb_mlme_ed_scan() { zb_ret_t ret = RET_OK; zb_mac_scan_confirm_t *scan_confirm; /* mac spec 7.5.2.1.1 ED channel scan - discard all frames received over the PHY data service (UBEC stack accepts only beacon frames) - check one-by-one all logical channels, if it is specified in the requested channel mask, switch to this channel, set phyCurrentChannel = new_channel_number; phyCurrentPage = 0 alwayes for ZB - perform ED measurement for current channel during [(aBaseSuperframeDuration * (2^n + 1)) symbols] time. - save maximum ED value to confirmation buffer - perform scan confirm on procedure finish */ TRACE_MSG(TRACE_MAC1, ">> zb_mlme_ed_scan", (FMT__0)); { zb_mlme_scan_params_t *scan_params = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mlme_scan_params_t); MAC_CTX().unscanned_channels = scan_params->channels; /* timeout is calculated in beacon intervals */ MAC_CTX().rt_ctx.ed_scan.scan_timeout = (1l << scan_params->scan_duration) + 1; } ZB_BUF_REUSE(MAC_CTX().pending_buf); scan_confirm = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mac_scan_confirm_t); ZB_ASSERT(scan_confirm); ZB_BZERO(scan_confirm, sizeof(zb_mac_scan_confirm_t)); scan_confirm->unscanned_channels = MAC_CTX().unscanned_channels; #ifndef ZB_NS_BUILD MAC_CTX().rt_ctx.ed_scan.channel_number = ZB_MAC_START_CHANNEL_NUMBER; MAC_CTX().rt_ctx.ed_scan.save_channel = MAC_CTX().current_channel; MAC_CTX().rt_ctx.ed_scan.max_rssi_value = 0; ret = ZB_SCHEDULE_ALARM(zb_mlme_scan_step, 0, MAC_CTX().rt_ctx.ed_scan.scan_timeout); #else /* ZB_NS_BUILD */ ZB_BZERO(&scan_confirm->list.energy_detect[0], sizeof(scan_confirm->list.energy_detect)); scan_confirm->result_list_size = ZB_MAC_SUPPORTED_CHANNELS; ret = ZB_SCHEDULE_CALLBACK(zb_mlme_scan_confirm, ZB_REF_FROM_BUF(MAC_CTX().pending_buf)); #ifdef ZB_CHANNEL_ERROR_TEST /* channel interference test, show energy on current channel */ TRACE_MSG(TRACE_MAC3, "ch_err_test %hd, logical_channel %hd, ch index %hd", (FMT__H_H_H, ZB_MAC_GET_CHANNEL_ERROR_TEST(), ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL(), ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL() - ZB_MAC_START_CHANNEL_NUMBER)); if (ZB_MAC_GET_CHANNEL_ERROR_TEST()) { scan_confirm->list.energy_detect[ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL() - ZB_MAC_START_CHANNEL_NUMBER] = ZB_CHANNEL_BUSY_ED_VALUE + 1; } #endif #endif /* ZB_NS_BUILD */ TRACE_MSG(TRACE_MAC1, "<< zb_mlme_ed_scan, ret %i", (FMT__D, ret)); return ret; }
void verify_buf(zb_buf_t *b) { zb_buf_t *p; for (p = ZG->bpool.head ; p ; p = p->u.next) { if (p == b) { TRACE_MSG(TRACE_MAC1, "ALREADY FREE buf %p %hd next %p", (FMT__P_H_P, p, ZB_REF_FROM_BUF(p), p->u.next)); } } }
zb_void_t zb_buf_reuse(zb_buf_t *zbbuf) { zb_uint8_t is_in_buf = zbbuf->u.hdr.is_in_buf; #ifdef ZB_DEBUG_BUFFERS TRACE_MSG(TRACE_NWK3, "zb_reuse_buf %p, ref %hd, in buf %hi allocated in %hd out %hd", (FMT__P_H_H_H_H, zbbuf, ZB_REF_FROM_BUF(zbbuf), zbbuf->u.hdr, zbbuf->u.hdr.is_in_buf, ZG->bpool.bufs_allocated[1], ZG->bpool.bufs_allocated[0])); #endif ZB_BZERO(&zbbuf->u, sizeof(zbbuf->u)); zbbuf->u.hdr.is_in_buf = is_in_buf; }
/* 7.1.11.1 MLME-SCAN.request */ void zb_mlme_scan_request(zb_uint8_t param) { zb_ret_t ret = RET_OK; zb_mlme_scan_params_t *params; zb_uint8_t scan_type; zb_uint8_t handle_scan_called = 0; TRACE_MSG(TRACE_MAC2, ">> zb_mlme_scan_request %hd", (FMT__H, param)); params = ZB_GET_BUF_PARAM((zb_buf_t *)ZB_BUF_FROM_REF(param), zb_mlme_scan_params_t); ZB_ASSERT(params); MAC_CTX().mac_status = MAC_SUCCESS; scan_type = params->scan_type; if (params->scan_duration > ZB_MAX_SCAN_DURATION_VALUE && scan_type != ORPHAN_SCAN) { ret = RET_ERROR; MAC_CTX().mac_status = MAC_INVALID_PARAMETER; } if ((ret == RET_OK)&&(!MAC_CTX().mlme_scan_in_progress)) { /* process request immediately*/ MAC_CTX().pending_buf = ZB_BUF_FROM_REF(param); ZB_SCHEDULE_CALLBACK(zb_handle_scan_request, param); handle_scan_called = 1; } if (!handle_scan_called) { zb_mac_scan_confirm_t *scan_confirm; scan_confirm = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mac_scan_confirm_t); scan_confirm->status = (ret == RET_OK) ? MAC_SUCCESS : MAC_CTX().mac_status != MAC_SUCCESS ? MAC_CTX().mac_status : MAC_INVALID_PARAMETER; scan_confirm->scan_type = scan_type; ZB_SCHEDULE_CALLBACK(zb_mlme_scan_confirm, ZB_REF_FROM_BUF(MAC_CTX().pending_buf)); } TRACE_MSG(TRACE_MAC2, "<< zb_mlme_scan_request", (FMT__0)); }
/* This function resend route request */ static void zb_nwk_mesh_resend_rreq(zb_buf_t *cbuf, zb_nwk_rreq_t *rreq) { zb_nwk_hdr_t *nwhdr; zb_nwk_cmd_rreq_t *rreq_cmd; int total; TRACE_MSG(TRACE_NWK1, ">> resend_rreq cbuf %p rreq %p", (FMT__P_P, cbuf, rreq)); rreq->retries++; /* fill nwk header */ ZB_BUF_INITIAL_ALLOC(cbuf, ZB_NWK_SHORT_HDR_SIZE(0), nwhdr); ZB_BZERO2(nwhdr->frame_control); ZB_NWK_FRAMECTL_SET_FRAME_TYPE_N_PROTO_VER(nwhdr->frame_control, ZB_NWK_FRAME_TYPE_COMMAND, ZB_PROTOCOL_VERSION); /*ZB_NWK_FRAMECTL_SET_DISCOVER_ROUTE(nwhdr->frame_control, 0); implied*/ nwhdr->src_addr = rreq->originator; nwhdr->dst_addr = ZB_NWK_BROADCAST_ROUTER_COORDINATOR; nwhdr->radius = rreq->radius; nwhdr->seq_num = ZB_NIB_SEQUENCE_NUMBER(); ZB_NIB_SEQUENCE_NUMBER_INC(); /* fill route request cmd&payload */ ZB_NWK_ALLOC_COMMAND_GET_PAYLOAD_PTR(cbuf, ZB_NWK_CMD_ROUTE_REQUEST, zb_nwk_cmd_rreq_t, rreq_cmd); memcpy(rreq_cmd, &rreq->cmd ,sizeof(*rreq_cmd)); /* transmit route request packet */ ZB_SET_BUF_PARAM(cbuf, ZB_NWK_INTERNAL_NSDU_HANDLE, zb_uint8_t); ZB_SCHEDULE_CALLBACK(zb_nwk_forward, ZB_REF_FROM_BUF(cbuf)); total = ( rreq->originator == ZB_NIB_NETWORK_ADDRESS() ) ? ZB_MWK_INITIAL_RREQ_RETRIES : ZB_MWK_RREQ_RETRIES; TRACE_MSG(TRACE_NWK1, "sent %d times of %d total", (FMT__D_D, rreq->retries, total + 1)); if ( rreq->retries > total ) { NWK_ROUTING_ARRAY_PUT_ENT(ZG->nwk.nib.rreq, rreq, ZG->nwk.nib.rreq_cnt); } TRACE_MSG(TRACE_NWK1, "<< mesh_resend_rreq", (FMT__0)); }
/* * Process an incoming route reply. * First we need to make sure that we have a discovery table and routing table * entries for this route reply. If not or if path cost is greater than what we * have, then discard route reply. * If the route reply is for us, then end the route discovery process and send * out any frames that are buffered in the pending list. Otherwise, forward the * route reply to the sender of the route request as recorded in the discover * table entry. */ void zb_nwk_mesh_rrep_handler(zb_buf_t *buf, zb_nwk_hdr_t *nwk_hdr, zb_nwk_cmd_rrep_t *nwk_cmd_rrep) { zb_uint8_t path_cost; zb_nwk_route_discovery_t *disc_ent; zb_nwk_routing_t *routing_ent; zb_uint16_t src_addr; TRACE_MSG(TRACE_NWK1, ">> rrep_handler buf %p nwk_hdr %p nwk_cmd_rrep %p", (FMT__P_P_P, buf, nwk_hdr, nwk_cmd_rrep)); /* parse mac header to get source address */ nwk_get_mac_source_addr(buf, &src_addr); /* find proper discovery and routing table entries, calculate path cost */ TRACE_MSG(TRACE_NWK1, "rrep cmd: rreq_id %hd orig %d resp %d", (FMT__H_D_D, nwk_cmd_rrep->rreq_id, nwk_cmd_rrep->originator, nwk_cmd_rrep->responder)); /* clac path cost */ NWK_CALC_PATH_COST(nwk_hdr->src_addr, path_cost); TRACE_MSG(TRACE_NWK1, "path_cost %hd", (FMT__H, path_cost)); path_cost += nwk_cmd_rrep->path_cost; TRACE_MSG(TRACE_NWK1, "total path_cost %hd", (FMT__H, path_cost)); ZB_NWK_ADDR_TO_LE16(nwk_cmd_rrep->responder); ZB_NWK_ADDR_TO_LE16(nwk_cmd_rrep->originator); NWK_ARRAY_FIND_ENT( ZG->nwk.nib.route_disc_table, disc_ent, (disc_ent->request_id == nwk_cmd_rrep->rreq_id) && (disc_ent->source_addr == nwk_cmd_rrep->originator) ); NWK_ARRAY_FIND_ENT( ZG->nwk.nib.routing_table, routing_ent, (routing_ent->dest_addr == nwk_cmd_rrep->responder) ); TRACE_MSG(TRACE_NWK1, "p_cost %d disc_ent %p r_ent %p residual_cost %hd", (FMT__D_P_P_H,\ path_cost, disc_ent, routing_ent, disc_ent ? disc_ent->residual_cost : 0)); /* check response is meaningful for us */ if ( !disc_ent || !routing_ent || path_cost > disc_ent->residual_cost ) { TRACE_MSG(TRACE_NWK1, "drop rresp, no purpose ent fnd or path too long", (FMT__0)); if ( !disc_ent || !routing_ent ) { TRACE_MSG(TRACE_NWK1, "one of disc or r ent is absent, free other", (FMT__0)); if ( disc_ent ) { NWK_ARRAY_PUT_ENT(ZG->nwk.nib.route_disc_table, disc_ent, ZG->nwk.nib.route_disc_table_cnt); } if ( routing_ent ) { NWK_ARRAY_PUT_ENT(ZG->nwk.nib.routing_table, routing_ent, ZG->nwk.nib.routing_table_cnt); } } goto done; } /* update route and discovery entries */ disc_ent->residual_cost = path_cost; disc_ent->expiration_time = ZB_NWK_ROUTE_DISCOVERY_EXPIRY; routing_ent->next_hop_addr = src_addr; routing_ent->status = (routing_ent->status == ZB_NWK_ROUTE_STATE_DISCOVERY_UNDERWAY) ? ZB_NWK_ROUTE_STATE_VALIDATION_UNDERWAY : routing_ent->status; /* check if rrep is for us */ if ( nwk_cmd_rrep->originator == ZB_NIB_NETWORK_ADDRESS() ) { zb_ushort_t i; TRACE_MSG(TRACE_NWK1, "got rrepl for our rreq, snd wait pckts", (FMT__0)); /* Route discovery is complete. Now we have a new route. Go throught pending * queue to find packet to be forwarded */ for(i = 0; i < ZB_NWK_PENDING_TABLE_SIZE; i++) { if ( ZG->nwk.nib.pending_table[i].used && ZG->nwk.nib.pending_table[i].dest_addr == routing_ent->dest_addr ) { TRACE_MSG(TRACE_NWK1, "fnd pkt to %d addr, send it", (FMT__D, routing_ent->dest_addr)); ZB_SET_BUF_PARAM(ZG->nwk.nib.pending_table[i].buf, ZG->nwk.nib.pending_table[i].handle, zb_uint8_t); ZB_SCHEDULE_CALLBACK(zb_nwk_forward, ZB_REF_FROM_BUF(ZG->nwk.nib.pending_table[i].buf)); NWK_ARRAY_PUT_ENT(ZG->nwk.nib.pending_table, &ZG->nwk.nib.pending_table[i], ZG->nwk.nib.pending_table_cnt); } } /* if the request was initiated by APS, confirm that route discovery failed */ TRACE_MSG(TRACE_NWK1, "aps_rreq_addr %d dst_addr %d", (FMT__D_D, ZG->nwk.nib.aps_rreq_addr, routing_ent->dest_addr)); if ( ZG->nwk.nib.aps_rreq_addr == routing_ent->dest_addr ) { #ifndef ZB_LIMITED_FEATURES NWK_ROUTE_DISCOVERY_CONFIRM(buf, ZB_NWK_STATUS_SUCCESS, 0xff); #endif ZG->nwk.nib.aps_rreq_addr = -1; } } else { /* forward */ TRACE_MSG(TRACE_NWK1, "frwd rrep to the %d address", (FMT__D, disc_ent->sender_addr)); zb_nwk_mesh_send_rrep(buf, nwk_cmd_rrep->rreq_id, nwk_cmd_rrep->originator, nwk_cmd_rrep->responder, path_cost, disc_ent->sender_addr); /* zb_nwk_mesh_send_rrep is now responsible for buf */ buf = NULL; } done: if ( buf ) { TRACE_MSG(TRACE_NWK1, "free buf %p", (FMT__P, buf)); zb_free_buf(buf); } TRACE_MSG(TRACE_NWK1, "<< rrep_handler", (FMT__0)); }
/* * Generate and send a route request command frame. * Add route request entry into the rreq list to be able to track this request. */ static zb_ret_t zb_nwk_mesh_send_rreq(zb_buf_t *cbuf, zb_nwk_cmd_rreq_t *nwk_cmd_rreq, zb_uint16_t src_addr, zb_uint8_t seq_num, zb_uint8_t path_cost, zb_uint8_t radius) { zb_ret_t ret = RET_OK; TRACE_MSG(TRACE_NWK1, ">> send_rreq cbuf %p rreq %p s_addr %d path_cost %hd radius %hd", (FMT__P_P_D_H_H, cbuf, nwk_cmd_rreq, src_addr, path_cost, radius)); #if 0 /* check we have room in rreq table */ if ( ZG->nwk.nib.rreq_cnt < ZB_NWK_RREQ_TABLE_SIZE ) #endif { zb_nwk_hdr_t *nwhdr; zb_nwk_cmd_rreq_t *rreq_cmd; #if 0 zb_nwk_rreq_t *rreq; #endif zb_bool_t secure = ZB_FALSE; #ifdef ZB_SECURITY secure = (ZG->aps.authenticated && ZG->nwk.nib.secure_all_frames && ZG->nwk.nib.security_level); #endif nwhdr = nwk_alloc_and_fill_hdr(cbuf, ZB_NWK_BROADCAST_ROUTER_COORDINATOR, NULL, NULL, ZB_FALSE, secure, ZB_TRUE); rreq_cmd = (zb_nwk_cmd_rreq_t *)nwk_alloc_and_fill_cmd(cbuf, ZB_NWK_CMD_ROUTE_REQUEST, sizeof(zb_nwk_cmd_rreq_t)); rreq_cmd->opt = 0; rreq_cmd->rreq_id = nwk_cmd_rreq->rreq_id; rreq_cmd->dest_addr = nwk_cmd_rreq->dest_addr; ZB_NWK_ADDR_TO_LE16(rreq_cmd->dest_addr); rreq_cmd->path_cost = path_cost; nwhdr->radius = radius; nwhdr->src_addr = src_addr; /* Not sure it is right, but let's assign original seq_num. Else request * can be dropped as a dup at receiver's side. */ if (src_addr != ZB_NIB_NETWORK_ADDRESS()) { nwhdr->seq_num = seq_num; } #if 0 /* Save info to retransmit request ZB_MWK_RREQ_RETRIES times */ NWK_ROUTING_ARRAY_GET_ENT(ZG->nwk.nib.rreq, rreq, ZG->nwk.nib.rreq_cnt); ZB_ASSERT(rreq); if ( rreq ) { rreq->originator = src_addr; rreq->radius = radius; rreq->retries = 1; memcpy(&rreq->cmd, rreq_cmd, sizeof(rreq->cmd)); /* schedule resend function */ if ( ZG->nwk.nib.rreq_cnt == 1 ) { ZB_SCHEDULE_ALARM_CANCEL(zb_nwk_mesh_expiry_rreq, ZB_ALARM_ANY_PARAM); ZB_SCHEDULE_ALARM(zb_nwk_mesh_expiry_rreq, -1, ZB_NWK_RREQ_RETRY_INTERVAL); } } #endif /* transmit route request packet */ ZB_SET_BUF_PARAM(cbuf, ZB_NWK_INTERNAL_NSDU_HANDLE, zb_uint8_t); ZB_SCHEDULE_CALLBACK(zb_nwk_forward, ZB_REF_FROM_BUF(cbuf)); } #if 0 else { zb_free_buf(cbuf); TRACE_MSG(TRACE_NWK1, "rreq buffer is full", (FMT__0)); ret = RET_NO_MEMORY; } #endif TRACE_MSG(TRACE_NWK1, "<< send_rreq %d", (FMT__D, ret)); return ret; }
/* this is a universal routine for ed/active/orphan scans */ void zb_mlme_scan_step(zb_uint8_t param) { zb_uint8_t channel_number; zb_mlme_scan_params_t *scan_params; zb_ret_t ret = RET_OK; zb_uint16_t timeout; zb_uint32_t *unscanned_channels; TRACE_MSG(TRACE_MAC1, ">> zb_mlme_scan_step", (FMT__0)); ZVUNUSED(param); channel_number = ZB_MAC_START_CHANNEL_NUMBER; scan_params = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mlme_scan_params_t); /* Table 2.80 Fields of the Mgmt_NWK_Disc_req Command (the other scans requests are using the same parameters A value used to calculate the length of time to spend scanning each channel. The time spent scanning each channel is (aBaseSuperframeDuration * (2^n + 1)) symbols, where n is the value of the ScanDuration parameter. */ timeout = (1l << scan_params->scan_duration) + 1; MAC_CTX().mlme_scan_in_progress = 1; unscanned_channels = &MAC_CTX().unscanned_channels; for (;channel_number < ZB_MAC_START_CHANNEL_NUMBER + ZB_MAC_MAX_CHANNEL_NUMBER;channel_number++) { if (*unscanned_channels & 1l<<channel_number) { TRACE_MSG(TRACE_MAC2, "set channel %hd", (FMT__H, channel_number)); ZB_TRANSCEIVER_SET_CHANNEL(channel_number); *unscanned_channels &=~(1l<<channel_number); if (scan_params->scan_type == ACTIVE_SCAN) { ret = zb_beacon_request_command(); if (ret == RET_OK) { /* check beacon request TX status */ /* There's nothing to do during active scan, so, synchronous */ ret = zb_check_cmd_tx_status(); } } ZB_SCHEDULE_ALARM_CANCEL(zb_mlme_scan_step, 0); ret = ZB_SCHEDULE_ALARM(zb_mlme_scan_step, 0, timeout); break; } } if (channel_number == (ZB_MAC_START_CHANNEL_NUMBER + ZB_MAC_MAX_CHANNEL_NUMBER)) { zb_mac_scan_confirm_t *scan_confirm; /* There's no need to restore channel after active or orphan scan, because we will choose new channel, according to scan results. */ /* ZB_TRANSCEIVER_SET_CHANNEL(MAC_CTX().rt_ctx.ed_scan.save_channel);*/ scan_confirm = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mac_scan_confirm_t); TRACE_MSG(TRACE_MAC3, "beacon found %hd", (FMT__H, MAC_CTX().rt_ctx.active_scan.beacon_found)); if (scan_params->scan_type == ED_SCAN || MAC_CTX().rt_ctx.active_scan.beacon_found || MAC_CTX().rt_ctx.orphan_scan.got_realignment) { scan_confirm->status = MAC_SUCCESS; /* Q: do we really need to zero here? What about got_realignment? * * A: I think yes, because it is the only indication * for NO_BEACON status, that will not affect ED or ORPHAN scans. ED just * doesn't need any packets, and ORPHAN needs a realignment command that * is processed in appropriate function */ MAC_CTX().rt_ctx.active_scan.beacon_found = 0; } else { scan_confirm->status = MAC_NO_BEACON; } #ifdef ZB_MAC_TESTING_MODE { scan_confirm->result_list_size = desc_count; } #endif scan_confirm->scan_type = scan_params->scan_type; MAC_CTX().mlme_scan_in_progress = 0; ZB_SCHEDULE_CALLBACK(zb_mlme_scan_confirm, ZB_REF_FROM_BUF(MAC_CTX().pending_buf)); } TRACE_MSG(TRACE_MAC1, "<< zb_mlme_scan_step", (FMT__0)); }