static int l2cap_chan_le_send(struct bt_l2cap_le_chan *ch, struct net_buf *buf, uint16_t sdu_hdr_len) { int len; /* Wait for credits */ nano_sem_take(&ch->tx.credits, TICKS_UNLIMITED); buf = l2cap_chan_create_seg(ch, buf, sdu_hdr_len); if (!buf) { return -ENOMEM; } /* Channel may have been disconnected while waiting for credits */ if (!ch->chan.conn) { net_buf_unref(buf); return -ECONNRESET; } BT_DBG("ch %p cid 0x%04x len %u credits %u", ch, ch->tx.cid, buf->len, ch->tx.credits.nsig); len = buf->len; bt_l2cap_send(ch->chan.conn, ch->tx.cid, buf); return len; }
void net_context_put(struct net_context *context) { if (!context) { return; } nano_sem_take(&contexts_lock, TICKS_UNLIMITED); if (context->tuple.ip_proto == IPPROTO_UDP) { if (net_context_get_receiver_registered(context)) { struct simple_udp_connection *udp = net_context_get_udp_connection(context); simple_udp_unregister(udp); } } #ifdef CONFIG_NETWORKING_WITH_TCP if (context->tcp_type == NET_TCP_TYPE_SERVER) { tcp_unlisten(UIP_HTONS(context->tuple.local_port), &context->tcp); } #endif memset(&context->tuple, 0, sizeof(context->tuple)); memset(&context->udp, 0, sizeof(context->udp)); context->receiver_registered = false; context_sem_give(&contexts_lock); }
/** * Main function of the timer task. This function is in charge of * calling the callbacks associated with the timers. * * This function is run in a high priority task on microkernel builds. * This function is run in a high priority fiber on nanokernel builds. * It implements an infinite loop that waits for any of the semaphores from * g_TimerSem. * When a semaphore is signaled, this function fetches the pointer to the * associated callback in g_TimerDesc, then calls it. * * @param dummy1 not used (required by ZEPHYR API) * @param dummy2 not used (required by ZEPHYR API) * * */ void timer_task(int dummy1, int dummy2) { int32_t timeout = UINT32_MAX; uint32_t now; UNUSED(dummy1); UNUSED(dummy2); while (1) { /* the Timer task shall never stop */ /* Before waiting for next timeout, publish it to warn QUARK cpu */ publish_cpu_timeout(timeout); /* block until g_TimerSem is signaled or until the next timeout expires */ #ifdef CONFIG_MICROKERNEL (void)task_sem_take(g_TimerSem, CONVERT_MS_TO_TICKS(timeout)); #else nano_sem_take(&g_TimerSem, CONVERT_MS_TO_TICKS(timeout)); #endif now = get_uptime_ms(); /* task is unblocked: check for expired timers */ while (g_CurrentTimerHead && is_after_expiration(now, &(g_CurrentTimerHead->desc))) { execute_callback(g_CurrentTimerHead); now = get_uptime_ms(); } /* Compute timeout until the expiration of the next timer */ if (g_CurrentTimerHead != NULL) { /* In micro kernel context, timeout = 0 or timeout < 0 works. * In nano kernel context timeout must be a positive value. */ timeout = g_CurrentTimerHead->desc.expiration - now; if (timeout < 0) panic(E_OS_ERR_OVERFLOW); } else { timeout = UINT32_MAX; } #ifdef __DEBUG_OS_ABSTRACTION_TIMER if (NULL != g_CurrentTimerHead) _log( "\nINFO : timer_task : now = %u, next timer expires at %u, timeout = %u", get_uptime_ms(), g_CurrentTimerHead->desc.expiration, timeout); else _log( "\nINFO : timer_task : now = %u, no next timer, timeout = OS_WAIT_FOREVER", get_uptime_ms()); #endif } /* end while(1) */ }
static inline uint8_t wait_for_ack(bool broadcast, bool ack_required) { if (broadcast || !ack_required) { return MAC_TX_OK; } if (nano_sem_take(&ack_lock, MSEC(10)) == 0) { nano_sem_init(&ack_lock); } if (!ack_received) { return MAC_TX_NOACK; } return MAC_TX_OK; }
int bmg160_update_byte(struct device *dev, uint8_t reg_addr, uint8_t mask, uint8_t value) { struct bmg160_device_config *dev_cfg = dev->config->config_info; struct bmg160_device_data *bmg160 = dev->driver_data; int ret = 0; bmg160_bus_config(dev); nano_sem_take(&bmg160->sem, TICKS_UNLIMITED); if (i2c_reg_update_byte(bmg160->i2c, dev_cfg->i2c_addr, reg_addr, mask, value) < 0) { ret = -EIO; } nano_sem_give(&bmg160->sem); return ret; }
static int bmg160_write(struct device *dev, uint8_t reg_addr, uint8_t *data, uint8_t len) { struct bmg160_device_config *dev_cfg = dev->config->config_info; struct bmg160_device_data *bmg160 = dev->driver_data; int ret = 0; bmg160_bus_config(dev); nano_sem_take(&bmg160->sem, TICKS_UNLIMITED); if (i2c_burst_write(bmg160->i2c, dev_cfg->i2c_addr, reg_addr, data, len) < 0) { ret = -EIO; } nano_sem_give(&bmg160->sem); return ret; }
struct net_context *net_context_get(enum ip_protocol ip_proto, const struct net_addr *remote_addr, uint16_t remote_port, struct net_addr *local_addr, uint16_t local_port) { #ifdef CONFIG_NETWORKING_WITH_IPV6 const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; const uip_ds6_addr_t *uip_addr; uip_ipaddr_t ipaddr; #endif int i; struct net_context *context = NULL; /* User must provide storage for the local address. */ if (!local_addr) { return NULL; } #ifdef CONFIG_NETWORKING_WITH_IPV6 if (memcmp(&local_addr->in6_addr, &in6addr_any, sizeof(in6addr_any)) == 0) { uip_addr = uip_ds6_get_global(-1); if (!uip_addr) { uip_addr = uip_ds6_get_link_local(-1); } if (!uip_addr) { return NULL; } memcpy(&local_addr->in6_addr, &uip_addr->ipaddr, sizeof(struct in6_addr)); } #else if (local_addr->in_addr.s_addr == INADDR_ANY) { uip_gethostaddr((uip_ipaddr_t *)&local_addr->in_addr); } #endif nano_sem_take(&contexts_lock, TICKS_UNLIMITED); if (local_port) { if (context_port_used(ip_proto, local_port, local_addr) < 0) { return NULL; } } else { do { local_port = random_rand() | 0x8000; } while (context_port_used(ip_proto, local_port, local_addr) == -EEXIST); } for (i = 0; i < NET_MAX_CONTEXT; i++) { if (!contexts[i].tuple.ip_proto) { contexts[i].tuple.ip_proto = ip_proto; contexts[i].tuple.remote_addr = (struct net_addr *)remote_addr; contexts[i].tuple.remote_port = remote_port; contexts[i].tuple.local_addr = (struct net_addr *)local_addr; contexts[i].tuple.local_port = local_port; context = &contexts[i]; break; } } context_sem_give(&contexts_lock); /* Set our local address */ #ifdef CONFIG_NETWORKING_WITH_IPV6 memcpy(&ipaddr.u8, local_addr->in6_addr.s6_addr, sizeof(ipaddr.u8)); if (uip_is_addr_mcast(&ipaddr)) { uip_ds6_maddr_add(&ipaddr); } else { uip_ds6_addr_add(&ipaddr, 0, ADDR_MANUAL); } #endif return context; }
static void adc_lock(struct adc_info *data) { nano_sem_take(&data->sem, TICKS_UNLIMITED); data->state = ADC_STATE_BUSY; }
void sol_mainloop_impl_lock(void) { nano_sem_take(&_sol_mainloop_lock, TICKS_UNLIMITED); }