static zb_buf_t *zb_get_buf(zb_uint8_t is_in) { zb_buf_t *buf = NULL; /* check that zb_init_buffers() was called */ ZB_ASSERT(ZG->bpool.head || ZG->bpool.bufs_allocated[0] || ZG->bpool.bufs_allocated[1]); /* Logically pool divided into 2 parts: input or output. Do not allow one part to eat entire another part to exclude deadlock. */ if (ZG->bpool.bufs_allocated[is_in] < ZB_BUFS_LIMIT) { buf = ZG->bpool.head; if (buf) { VERIFY_BUFS(); ZG->bpool.head = buf->u.next; VERIFY_BUFS(); ZB_BZERO(&buf->u, sizeof(buf->u)); ZG->bpool.bufs_allocated[is_in]++; ZB_ASSERT(ZG->bpool.bufs_allocated[is_in] <= ZB_BUFS_LIMIT); buf->u.hdr.is_in_buf = is_in; } } #ifdef ZB_DEBUG_BUFFERS TRACE_MSG( TRACE_MAC1, "zb_get_buf %hd: buffer %p, ref %hd, head %p, allocated %hd / %hd", (FMT__H_P_H_P_H, is_in, buf, ZB_REF_FROM_BUF(buf), ZG->bpool.head, ZG->bpool.bufs_allocated[0], ZG->bpool.bufs_allocated[1])); #endif return buf; }
/* * Sets up new route discovery operation. * Call rreq_handler to do a real discovery. */ void zb_nwk_mesh_route_discovery(zb_buf_t *cbuf, zb_uint16_t dest_addr, zb_uint8_t radius) { zb_nwk_hdr_t *nwk_hdr; zb_nwk_cmd_rreq_t *nwk_cmd_rreq; TRACE_MSG(TRACE_NWK1, ">> route_discovery cbuf %p dest_addr %d radius %hd", (FMT__P_D_H, cbuf, dest_addr, radius)); if ( cbuf ) { /* alloc space for nwk header and rreq cmd */ ZB_BUF_INITIAL_ALLOC(cbuf, ZB_NWK_SHORT_HDR_SIZE(0), nwk_hdr); ZB_BUF_ALLOC_RIGHT(cbuf, sizeof(*nwk_cmd_rreq), nwk_cmd_rreq); ZB_ASSERT(nwk_hdr && nwk_cmd_rreq); /* fill meaningful hdr and command parameters for rrreq handler */ nwk_cmd_rreq->opt = 0; nwk_cmd_rreq->rreq_id = ZB_NWK_GET_RREQ_ID(); nwk_cmd_rreq->dest_addr = dest_addr; nwk_cmd_rreq->path_cost = 0; /* default radius = 2 * max depth */ nwk_hdr->radius = radius ? radius : (zb_uint8_t)(ZG->nwk.nib.max_depth << 1); nwk_hdr->src_addr = ZB_NIB_NETWORK_ADDRESS(); zb_nwk_mesh_rreq_handler(cbuf, nwk_hdr, nwk_cmd_rreq); } else { ZB_GET_OUT_BUF_DELAYED(zb_nwk_mesh_initiate_route_discovery); } TRACE_MSG(TRACE_NWK1, "<< route_discovery", (FMT__0)); }
void *zb_buf_cut_left(zb_buf_t *zbbuf, zb_uint8_t size) { ZB_ASSERT(ZB_BUF_LEN(zbbuf) >= (size)); /* ++VS */ (zbbuf)->u.hdr.len -= (size); (zbbuf)->u.hdr.data_offset += (size); return (void *)ZB_BUF_BEGIN(zbbuf); }
void zb_free_buf(zb_buf_t *buf) { /* do trace this function, because it can cause lack of out buffers */ zb_buf_q_ent_t *ent = NULL; /* check that zb_init_buffers() was called */ ZB_ASSERT(ZG->bpool.head || ZG->bpool.bufs_allocated[0] || ZG->bpool.bufs_allocated[1]); ZB_ASSERT(ZG->bpool.bufs_allocated[buf->u.hdr.is_in_buf] > 0); ZG->bpool.bufs_allocated[buf->u.hdr.is_in_buf]--; #ifdef ZB_DEBUG_BUFFERS TRACE_MSG(TRACE_NWK3, "zb_free_buf %p, ref %hd, in buf %hi allocated in %hd out %hd", (FMT__P_H_H_H_H, buf, ZB_REF_FROM_BUF(buf), buf->u.hdr, buf->u.hdr.is_in_buf, ZG->bpool.bufs_allocated[1], ZG->bpool.bufs_allocated[0])); #endif VERIFY_BUF(buf); buf->u.next = ZG->bpool.head; ZG->bpool.head = buf; VERIFY_BUFS(); if (buf->u.hdr.is_in_buf) { /* if we need a buffer for rx packet, we should not pass it to some */ /* other callback */ if (!MAC_CTX().rx_need_buf) ZB_SL_LIST_CUT_HEAD(ZG->sched.inbuf_queue, next, ent); } else { ZB_SL_LIST_CUT_HEAD(ZG->sched.outbuf_queue, next, ent); } if (ent) { ZB_SCHEDULE_CALLBACK(ent->func, ZB_REF_FROM_BUF(zb_get_buf(buf->u.hdr.is_in_buf))); ZB_STK_PUSH(ZG->sched.buf_freelist, next, ent); } #ifdef ZB_DEBUG_BUFFERS TRACE_MSG( TRACE_MAC1, "free_buf: %hd/%hd buf %p, next %p, head %p", (FMT__H_H_P_P_P, ZG->bpool.bufs_allocated[1], ZG->bpool.bufs_allocated[0], buf, buf->u.next, ZG->bpool.head)); #endif }
zb_ret_t zb_mlme_ed_scan() { zb_ret_t ret = RET_OK; zb_mac_scan_confirm_t *scan_confirm; /* mac spec 7.5.2.1.1 ED channel scan - discard all frames received over the PHY data service (UBEC stack accepts only beacon frames) - check one-by-one all logical channels, if it is specified in the requested channel mask, switch to this channel, set phyCurrentChannel = new_channel_number; phyCurrentPage = 0 alwayes for ZB - perform ED measurement for current channel during [(aBaseSuperframeDuration * (2^n + 1)) symbols] time. - save maximum ED value to confirmation buffer - perform scan confirm on procedure finish */ TRACE_MSG(TRACE_MAC1, ">> zb_mlme_ed_scan", (FMT__0)); { zb_mlme_scan_params_t *scan_params = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mlme_scan_params_t); MAC_CTX().unscanned_channels = scan_params->channels; /* timeout is calculated in beacon intervals */ MAC_CTX().rt_ctx.ed_scan.scan_timeout = (1l << scan_params->scan_duration) + 1; } ZB_BUF_REUSE(MAC_CTX().pending_buf); scan_confirm = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mac_scan_confirm_t); ZB_ASSERT(scan_confirm); ZB_BZERO(scan_confirm, sizeof(zb_mac_scan_confirm_t)); scan_confirm->unscanned_channels = MAC_CTX().unscanned_channels; #ifndef ZB_NS_BUILD MAC_CTX().rt_ctx.ed_scan.channel_number = ZB_MAC_START_CHANNEL_NUMBER; MAC_CTX().rt_ctx.ed_scan.save_channel = MAC_CTX().current_channel; MAC_CTX().rt_ctx.ed_scan.max_rssi_value = 0; ret = ZB_SCHEDULE_ALARM(zb_mlme_scan_step, 0, MAC_CTX().rt_ctx.ed_scan.scan_timeout); #else /* ZB_NS_BUILD */ ZB_BZERO(&scan_confirm->list.energy_detect[0], sizeof(scan_confirm->list.energy_detect)); scan_confirm->result_list_size = ZB_MAC_SUPPORTED_CHANNELS; ret = ZB_SCHEDULE_CALLBACK(zb_mlme_scan_confirm, ZB_REF_FROM_BUF(MAC_CTX().pending_buf)); #ifdef ZB_CHANNEL_ERROR_TEST /* channel interference test, show energy on current channel */ TRACE_MSG(TRACE_MAC3, "ch_err_test %hd, logical_channel %hd, ch index %hd", (FMT__H_H_H, ZB_MAC_GET_CHANNEL_ERROR_TEST(), ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL(), ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL() - ZB_MAC_START_CHANNEL_NUMBER)); if (ZB_MAC_GET_CHANNEL_ERROR_TEST()) { scan_confirm->list.energy_detect[ZB_MAC_GET_CURRENT_LOGICAL_CHANNEL() - ZB_MAC_START_CHANNEL_NUMBER] = ZB_CHANNEL_BUSY_ED_VALUE + 1; } #endif #endif /* ZB_NS_BUILD */ TRACE_MSG(TRACE_MAC1, "<< zb_mlme_ed_scan, ret %i", (FMT__D, ret)); return ret; }
/** Initial allocate space in buffer. @param zbbuf - buffer @param size - size to allocate @param ptr - (out) pointer to the buffer begin */ zb_void_t *zb_buf_initial_alloc(zb_buf_t *zbbuf, zb_uint8_t size) { zb_uint8_t is_in_buf = zbbuf->u.hdr.is_in_buf; ZB_ASSERT((size) < ZB_IO_BUF_SIZE); ZB_BZERO(&zbbuf->u, sizeof(zbbuf->u)); zbbuf->u.hdr.is_in_buf = is_in_buf; (zbbuf)->u.hdr.len = (size); (zbbuf)->u.hdr.data_offset = (ZB_IO_BUF_SIZE - (size)) / 2; return (void *)ZB_BUF_BEGIN(zbbuf); }
/* 7.3.6 sends orphan notification command return RET_OK, RET_ERROR */ zb_ret_t zb_orphan_notification_command() { zb_ret_t ret; zb_uint8_t mhr_len; zb_uint8_t *ptr = NULL; zb_mac_mhr_t mhr; /* Orphan notification command 1. Fill MHR fields - set dst pan id = 0xffff - set dst addr = 0xffff 2. Fill FCF - set frame pending = 0, ack req = 0, security enabled = 0 - set dst addr mode to ZB_ADDR_16BIT_DEV_OR_BROADCAST - set src addr mode to ZB_ADDR_64BIT_DEV 3. Set command frame id = 0x07 (Beacon request) */ TRACE_MSG(TRACE_MAC2, ">>orphan_notif_cmd", (FMT__0)); mhr_len = zb_mac_calculate_mhr_length(ZB_ADDR_64BIT_DEV, ZB_ADDR_16BIT_DEV_OR_BROADCAST, 1); { zb_uint8_t packet_length = mhr_len + 1; ZB_BUF_INITIAL_ALLOC(MAC_CTX().operation_buf, packet_length, ptr); ZB_ASSERT(ptr); ZB_BZERO(ptr, packet_length); } /* TODO: optimize FC fill */ ZB_BZERO2(mhr.frame_control); ZB_FCF_SET_FRAME_TYPE(mhr.frame_control, MAC_FRAME_COMMAND); ZB_FCF_SET_DST_ADDRESSING_MODE(mhr.frame_control, ZB_ADDR_16BIT_DEV_OR_BROADCAST); ZB_FCF_SET_SRC_ADDRESSING_MODE(mhr.frame_control, ZB_ADDR_64BIT_DEV); ZB_FCF_SET_PANID_COMPRESSION_BIT(mhr.frame_control, 1); ZB_FCF_SET_FRAME_VERSION(mhr.frame_control, MAC_FRAME_VERSION); /* 7.2.1 General MAC frame format */ mhr.seq_number = ZB_MAC_DSN(); ZB_INC_MAC_DSN(); mhr.dst_pan_id = ZB_BROADCAST_PAN_ID; mhr.dst_addr.addr_short = ZB_MAC_SHORT_ADDR_NO_VALUE; ZB_IEEE_ADDR_COPY(mhr.src_addr.addr_long, ZB_PIB_EXTENDED_ADDRESS()); zb_mac_fill_mhr(ptr, &mhr); *(ptr + mhr_len) = MAC_CMD_ORPHAN_NOTIFICATION; MAC_ADD_FCS(MAC_CTX().operation_buf); ret = ZB_TRANS_SEND_COMMAND(mhr_len, MAC_CTX().operation_buf); TRACE_MSG(TRACE_MAC2, "<<orphan_notif_cmd %hd", (FMT__H, ret)); return ret; }
void zb_erase_nvram(zb_uint8_t page) { FLSCL |= 0x01; /* enable FLASH write/erase */ PSCTL |= 0x03; /* enable erasing FLASH */ PSCTL |= 0x04; /* redirect erasing FLASH to scratchpad FLASH */ /* writing anywhere initiates erase of the whole page, scratch pad pages are 128 instead of 256 bytes */ ZB_ASSERT(page==0||page==1); ZB_XDATA_MEM[129*page] = 0x00; PSCTL &= ~0x07; /* set PSWE = PSEE = SFLE = 0 to disable all access to scratchpad FLASH in place of xdata*/ FLSCL &= ~0x01; /* disable FLASH write/erase */ }
zb_ret_t zb_mlme_active_scan() { zb_ret_t ret = RET_OK; zb_mlme_scan_params_t *scan_params; zb_uint8_t channel_number; /* mac spec 7.5.2.1.2 Active channel scan - set macPANId to 0xffff in order to accept all incoming beacons - switch to next channel - send beacon request, mac spec 7.3.7 Beacon request command - enable receiver for [aBaseSuperframeDuration * (2^n + 1)] symbols == (2^n + 1)Beacon_Intervals, n == request.ScanDuration; accept only beacon frames - use mode macAutoRequest == FALSE: send each beacon to the higher layer using MLME-BEACON-NOTIFY indication. Beacon frame can contain payload - if frame_control.Security Enabled == 1, unsecure the beacon frame (mac spec 7.5.8.2.3) --- not supported now - if at least 1 beacon request was successfully sent but no beacons were found, set status NO_BEACON */ TRACE_MSG(TRACE_MAC1, ">> zb_mlme_active_scan", (FMT__0)); scan_params = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mlme_scan_params_t); ZB_ASSERT(scan_params); TRACE_MSG(TRACE_MAC1, "idle state, set beacon found == 0", (FMT__0)); MAC_CTX().rt_ctx.active_scan.beacon_found = 0; #ifdef ZB_MAC_TESTING_MODE MAC_CTX().rt_ctx.active_scan.pan_desc_buf_param = ZB_UNDEFINED_BUFFER; #endif channel_number = ZB_MAC_START_CHANNEL_NUMBER; TRACE_MSG(TRACE_MAC3, "set beacon mode ret %d, param channels 0x%x", (FMT__D_D, ret, scan_params->channels)); MAC_CTX().unscanned_channels = scan_params->channels; #ifdef ZB_MAC_TESTING_MODE if (MAC_CTX().rt_ctx.active_scan.stop_scan) { ZB_SCHEDULE_ALARM_CANCEL(zb_mac_scan_timeout, 0); ret = RET_OK; break; } #endif TRACE_MSG(TRACE_MAC2, "chan mask %x %x , chan %hd", (FMT__D_D_H, ((zb_uint16_t*)&scan_params->channels)[0], ((zb_uint16_t*)&scan_params->channels)[1], channel_number)); ZB_SCHEDULE_CALLBACK(zb_mlme_scan_step,0); TRACE_MSG(TRACE_MAC1, "<< zb_mlme_active_scan, ret %i", (FMT__D, ret)); return ret; }
/* 7.1.11.1 MLME-SCAN.request */ void zb_mlme_scan_request(zb_uint8_t param) { zb_ret_t ret = RET_OK; zb_mlme_scan_params_t *params; zb_uint8_t scan_type; zb_uint8_t handle_scan_called = 0; TRACE_MSG(TRACE_MAC2, ">> zb_mlme_scan_request %hd", (FMT__H, param)); params = ZB_GET_BUF_PARAM((zb_buf_t *)ZB_BUF_FROM_REF(param), zb_mlme_scan_params_t); ZB_ASSERT(params); MAC_CTX().mac_status = MAC_SUCCESS; scan_type = params->scan_type; if (params->scan_duration > ZB_MAX_SCAN_DURATION_VALUE && scan_type != ORPHAN_SCAN) { ret = RET_ERROR; MAC_CTX().mac_status = MAC_INVALID_PARAMETER; } if ((ret == RET_OK)&&(!MAC_CTX().mlme_scan_in_progress)) { /* process request immediately*/ MAC_CTX().pending_buf = ZB_BUF_FROM_REF(param); ZB_SCHEDULE_CALLBACK(zb_handle_scan_request, param); handle_scan_called = 1; } if (!handle_scan_called) { zb_mac_scan_confirm_t *scan_confirm; scan_confirm = ZB_GET_BUF_PARAM(MAC_CTX().pending_buf, zb_mac_scan_confirm_t); scan_confirm->status = (ret == RET_OK) ? MAC_SUCCESS : MAC_CTX().mac_status != MAC_SUCCESS ? MAC_CTX().mac_status : MAC_INVALID_PARAMETER; scan_confirm->scan_type = scan_type; ZB_SCHEDULE_CALLBACK(zb_mlme_scan_confirm, ZB_REF_FROM_BUF(MAC_CTX().pending_buf)); } TRACE_MSG(TRACE_MAC2, "<< zb_mlme_scan_request", (FMT__0)); }
/* This function is called when we got new buffer to initiate route discovery */ void zb_nwk_mesh_initiate_route_discovery(zb_uint8_t param) { zb_nwk_pend_t *ent; zb_buf_t *cbuf = (zb_buf_t *)ZB_BUF_FROM_REF(param); TRACE_MSG(TRACE_NWK1, ">> initiate_route_discovery %hd", (FMT__H, param)); NWK_ARRAY_FIND_ENT(ZG->nwk.nib.pending_table, ent, ent->waiting_buf); if ( ent ) { zb_nwk_hdr_t *nwhdr = (zb_nwk_hdr_t *)ZB_BUF_BEGIN(ent->buf); zb_nwk_mesh_route_discovery(cbuf, nwhdr->dst_addr, 0); ent->waiting_buf = 0; } else { zb_free_buf(cbuf); TRACE_MSG(TRACE_NWK1, "perepil", (FMT__0)); ZB_ASSERT(0); } TRACE_MSG(TRACE_NWK1, "<< initiate_route_discovery", (FMT__0)); }
/* * Generate and send a route request command frame. * Add route request entry into the rreq list to be able to track this request. */ static zb_ret_t zb_nwk_mesh_send_rreq(zb_buf_t *cbuf, zb_nwk_cmd_rreq_t *nwk_cmd_rreq, zb_uint16_t src_addr, zb_uint8_t seq_num, zb_uint8_t path_cost, zb_uint8_t radius) { zb_ret_t ret = RET_OK; TRACE_MSG(TRACE_NWK1, ">> send_rreq cbuf %p rreq %p s_addr %d path_cost %hd radius %hd", (FMT__P_P_D_H_H, cbuf, nwk_cmd_rreq, src_addr, path_cost, radius)); #if 0 /* check we have room in rreq table */ if ( ZG->nwk.nib.rreq_cnt < ZB_NWK_RREQ_TABLE_SIZE ) #endif { zb_nwk_hdr_t *nwhdr; zb_nwk_cmd_rreq_t *rreq_cmd; #if 0 zb_nwk_rreq_t *rreq; #endif zb_bool_t secure = ZB_FALSE; #ifdef ZB_SECURITY secure = (ZG->aps.authenticated && ZG->nwk.nib.secure_all_frames && ZG->nwk.nib.security_level); #endif nwhdr = nwk_alloc_and_fill_hdr(cbuf, ZB_NWK_BROADCAST_ROUTER_COORDINATOR, NULL, NULL, ZB_FALSE, secure, ZB_TRUE); rreq_cmd = (zb_nwk_cmd_rreq_t *)nwk_alloc_and_fill_cmd(cbuf, ZB_NWK_CMD_ROUTE_REQUEST, sizeof(zb_nwk_cmd_rreq_t)); rreq_cmd->opt = 0; rreq_cmd->rreq_id = nwk_cmd_rreq->rreq_id; rreq_cmd->dest_addr = nwk_cmd_rreq->dest_addr; ZB_NWK_ADDR_TO_LE16(rreq_cmd->dest_addr); rreq_cmd->path_cost = path_cost; nwhdr->radius = radius; nwhdr->src_addr = src_addr; /* Not sure it is right, but let's assign original seq_num. Else request * can be dropped as a dup at receiver's side. */ if (src_addr != ZB_NIB_NETWORK_ADDRESS()) { nwhdr->seq_num = seq_num; } #if 0 /* Save info to retransmit request ZB_MWK_RREQ_RETRIES times */ NWK_ROUTING_ARRAY_GET_ENT(ZG->nwk.nib.rreq, rreq, ZG->nwk.nib.rreq_cnt); ZB_ASSERT(rreq); if ( rreq ) { rreq->originator = src_addr; rreq->radius = radius; rreq->retries = 1; memcpy(&rreq->cmd, rreq_cmd, sizeof(rreq->cmd)); /* schedule resend function */ if ( ZG->nwk.nib.rreq_cnt == 1 ) { ZB_SCHEDULE_ALARM_CANCEL(zb_nwk_mesh_expiry_rreq, ZB_ALARM_ANY_PARAM); ZB_SCHEDULE_ALARM(zb_nwk_mesh_expiry_rreq, -1, ZB_NWK_RREQ_RETRY_INTERVAL); } } #endif /* transmit route request packet */ ZB_SET_BUF_PARAM(cbuf, ZB_NWK_INTERNAL_NSDU_HANDLE, zb_uint8_t); ZB_SCHEDULE_CALLBACK(zb_nwk_forward, ZB_REF_FROM_BUF(cbuf)); } #if 0 else { zb_free_buf(cbuf); TRACE_MSG(TRACE_NWK1, "rreq buffer is full", (FMT__0)); ret = RET_NO_MEMORY; } #endif TRACE_MSG(TRACE_NWK1, "<< send_rreq %d", (FMT__D, ret)); return ret; }
/** Get buffer tail of size 'size' Macro usually used to place external information (some parameters) to the buffer @param zbbuf - buffer @param size - requested size @return pointer to the buffer tail */ zb_void_t *zb_get_buf_tail(zb_buf_t *zbbuf, zb_uint8_t size) { ZB_ASSERT((zbbuf) && ZB_BUF_LEN(zbbuf) + (zbbuf)->u.hdr.data_offset + (size) <= ZB_IO_BUF_SIZE); return (void *)((zbbuf)->buf + (ZB_IO_BUF_SIZE - (size))); }
void zb_buf_cut_right(zb_buf_t *zbbuf, zb_uint8_t size) { ZB_ASSERT(ZB_BUF_LEN(zbbuf) >= (size)); (zbbuf)->u.hdr.len -= (size); }