Пример #1
0
void HCI_Init(void)
{
  uint8_t index;
  
  /* Initialize list heads of ready and free hci data packet queues */
  list_init_head (&hciReadPktPool);
  list_init_head (&hciReadPktRxQueue);
  
  /* Initialize the queue of free hci data packets */
  for (index = 0; index < HCI_READ_PACKET_NUM_MAX; index++)
  {
    list_insert_tail(&hciReadPktPool, (tListNode *)&hciReadPacketBuffer[index]);
  }
}
Пример #2
0
static struct sxmpd_node *__init_instance(const char *name)
{
  struct sxmpd_node *nn = malloc(sizeof(struct sxmpd_node));
  char *nm = strdup(name);
  sxhub_t *sys = malloc(sizeof(sxhub_t));

  if(!nn) {
  __enomem:
    if(nn) free(nn);
    if(nm) free(nm);
    if(sys) free(sys);
    return NULL;
  } else memset(nn, 0, sizeof(struct sxmpd_node));

  if(!nm) goto __enomem;
  else nn->name = nm;

  if(!sys) goto __enomem;
  else {
    if(sxhub_init(sys)) goto __enomem;
    nn->sys = sys;
    sxhub_set_priv(nn->sys, nn); /* set instance */
  }

  /* init various data structures */
  usrtc_node_init(&nn->node, nn);
  list_init_head(&(nn->pem_filter));
  list_init_head(&(nn->account_filter));
  list_init_head(&(nn->rpc_filter));
  list_init_head(&(nn->ondestroy_filter));
  list_init_head(&(nn->onpulse_filter));
  /* rpc list */
  sxmp_rpclist_init(&(nn->rpclist));

  /* chacks init */
  usrtc_init(&nn->chacks, USRTC_SPLAY, 65535, __cmp_cstr);
  /* rpc add TODO: add results */
  sxmp_rpclist_add(&(nn->rpclist), 2, "PC002", NULL);
  sxmp_rpclist_add_function(&(nn->rpclist), 2, "channel-ack-get-stream",
                            __chack_get_stream);
  sxmp_rpclist_add_function(&(nn->rpclist), 2, "channel-ack-get-stream-list",
                            __chack_get_stream_list);

  return nn;
}
Пример #3
0
/**
 * Initialize a new avl_tree struct
 * @param tree pointer to avl-tree
 * @param comp pointer to comparator for the tree
 * @param allow_dups true if the tree allows multiple
 *   elements with the same
 * @param ptr custom parameter for comparator
 */
void
avl_init(struct avl_tree *tree, avl_tree_comp comp, bool allow_dups, void *ptr)
{
  list_init_head(&tree->list_head);
  tree->root = NULL;
  tree->count = 0;
  tree->comp = comp;
  tree->allow_dups = allow_dups;
  tree->cmp_ptr = ptr;
}
Пример #4
0
Queue new_queue(void) {
	Queue ret = malloc(sizeof(*ret));

	if (ret == NULL)
		return NULL;

	list_init_head(ret);

	return ret;
}
Пример #5
0
/**
 * Initialize a new avl_tree struct
 * @param tree pointer to avl-tree
 * @param comp pointer to comparator for the tree
 * @param allow_dups true if the tree allows multiple
 *   elements with the same
 */
void
avl_init(struct avl_tree *tree,
    int (*comp) (const void *k1, const void *k2),
    bool allow_dups)
{
  list_init_head(&tree->list_head);
  tree->root = NULL;
  tree->count = 0;
  tree->comp = comp;
  tree->allow_dups = allow_dups;
}
Пример #6
0
Файл: irq.c Проект: tdz/opsys
void
init_irq_handling(int (*enable_irq)(unsigned char),
                  void (*disable_irq)(unsigned char))
{
    struct list* head = g_irq_handling.irqh;
    const struct list* head_end = head + ARRAY_NELEMS(g_irq_handling.irqh);

    for (; head < head_end; ++head) {
        list_init_head(head);
    }

    g_irq_handling.enable_irq = enable_irq;
    g_irq_handling.disable_irq = disable_irq;
}
Пример #7
0
static void kmem_cache_ctor(struct kmem_cache *cache, const char *name,
                            size_t size, size_t alignment)
{
    strncpy(cache->name, name, sizeof(cache->name) / sizeof(*cache->name) - 1);
    cache->size = size;
    if (alignment < sizeof(void *))
    {
        alignment = sizeof(void *);
    }
    cache->alignment = alignment;
    cache->object_size = ALIGN(size, alignment);
    cache->pages_per_slab = get_pages_per_slab(cache->object_size);
    cache->num_objects =
        cache->pages_per_slab * PAGE_SIZE / cache->object_size;
    list_init_head(&cache->partial_slabs);
}
Пример #8
0
Файл: sched.c Проект: tdz/opsys
/**
 * \brief init scheduler
 * \param[in] idle the initial idle thread
 * \return 0 on success, or a negative error code otherwise
 *
 * This initializes the scheduler. The passed thread is the idle
 * thread. It is added to the thread list automatically.
 */
int
sched_init(struct tcb* idle)
{
    assert(idle);

    for (size_t i = 0; i < ARRAY_NELEMS(g_current_thread); ++i) {
        g_current_thread[i] = idle;
    }

    for (size_t i = 0; i < ARRAY_NELEMS(g_thread); ++i) {
        list_init_head(g_thread + i);
    }

    alarm_init(&g_alarm, alarm_handler);

    int res = timer_add_alarm(&g_alarm, sched_timeout());
    if (res < 0) {
        goto err_timer_add_alarm;
    }

    res = sched_add_thread(idle, 0);
    if (res < 0) {
        goto err_sched_add_thread;
    }

    return 0;

err_sched_add_thread:
    timer_remove_alarm(&g_alarm);
err_timer_add_alarm:
    for (size_t i = ARRAY_NELEMS(g_current_thread); i;) {
        --i;
        g_current_thread[i] = NULL;
    }
    return res;
}
Пример #9
0
size_t make_lists(struct list_node out[], struct tree_node *root) {

	if (root == NULL) {
		return 0;
	}

	Queue queue = new_queue();
	if (queue == NULL) {
		return 0;
	}

	unsigned curr_level = 1;
	unsigned next_level = 0;
	size_t cursor = 0;
	enqueue(queue, root);

	while (!queue_is_empty(queue)) {
		list_init_head(&out[cursor]);

		size_t i;
		for (i = 0; i < curr_level; i++) {
			struct tree_node *node = dequeue(queue);
			next_level += enqueue_all_children(queue, node);

			struct list_node *list_el = &list_nodes[next_node++];
			list_set_data(list_el, node);
			list_append(&out[cursor], list_el);
		}

		cursor++;
		curr_level = next_level;
		next_level = 0;
	}

	return cursor;
}
Пример #10
0
int hci_send_req(struct hci_request *r, BOOL async)
{
  uint8_t *ptr;
  uint16_t opcode = htobs(cmd_opcode_pack(r->ogf, r->ocf));
  hci_event_pckt *event_pckt;
  hci_uart_pckt *hci_hdr;
  int to = DEFAULT_TIMEOUT;
  struct timer t;
  tHciDataPacket * hciReadPacket = NULL;
  tListNode hciTempQueue;
  
  list_init_head(&hciTempQueue);
  
  hci_send_cmd(r->ogf, r->ocf, r->clen, r->cparam);
  
  if(async){
    goto done;
  }
  
  /* Minimum timeout is 1. */
  if(to == 0)
    to = 1;
  
  Timer_Set(&t, to);
  
  while(1) {
    evt_cmd_complete *cc;
    evt_cmd_status *cs;
    evt_le_meta_event *me;
    int len;
      
#if ENABLE_MICRO_SLEEP    
    while(1){
      ATOMIC_SECTION_BEGIN();
      if(Timer_Expired(&t)){
        ATOMIC_SECTION_END();
        goto failed;
      }
      if(!HCI_Queue_Empty()){
        ATOMIC_SECTION_END();
        break;
      }
      Enter_Sleep_Mode();
      ATOMIC_SECTION_END();
    }
#else
    while(1){
      if(Timer_Expired(&t)){
        goto failed;
      }
      if(!HCI_Queue_Empty()){
        break;
      }
    }
#endif
    
    /* Extract packet from HCI event queue. */
    Disable_SPI_IRQ();
    list_remove_head(&hciReadPktRxQueue, (tListNode **)&hciReadPacket);    
    
    hci_hdr = (void *)hciReadPacket->dataBuff;
    if(hci_hdr->type != HCI_EVENT_PKT){
      list_insert_tail(&hciTempQueue, (tListNode *)hciReadPacket); // See comment below
      Enable_SPI_IRQ();
      continue;
    }
    
    event_pckt = (void *) (hci_hdr->data);
    
    ptr = hciReadPacket->dataBuff + (1 + HCI_EVENT_HDR_SIZE);
    len = hciReadPacket->data_len - (1 + HCI_EVENT_HDR_SIZE);
    
    switch (event_pckt->evt) {
      
    case EVT_CMD_STATUS:
      cs = (void *) ptr;
      
      if (cs->opcode != opcode)
        goto failed;
      
      if (r->event != EVT_CMD_STATUS) {
        if (cs->status) {
          goto failed;
        }
        break;
      }
      
      r->rlen = MIN(len, r->rlen);
      Osal_MemCpy(r->rparam, ptr, r->rlen);
      goto done;
      
    case EVT_CMD_COMPLETE:
      cc = (void *) ptr;
      
      if (cc->opcode != opcode)
        goto failed;
      
      ptr += EVT_CMD_COMPLETE_SIZE;
      len -= EVT_CMD_COMPLETE_SIZE;
      
      r->rlen = MIN(len, r->rlen);
      Osal_MemCpy(r->rparam, ptr, r->rlen);
      goto done;
      
    case EVT_LE_META_EVENT:
      me = (void *) ptr;
      
      if (me->subevent != r->event)
        break;
      
      len -= 1;
      r->rlen = MIN(len, r->rlen);
      Osal_MemCpy(r->rparam, me->data, r->rlen);
      goto done;
      
    case EVT_HARDWARE_ERROR:            
      goto failed;
      
    default:      
      break;
    }
    
    /* In the meantime there could be other events from the controller.
    In this case, insert the packet in a different queue. These packets will be
    inserted back in the main queue just before exiting from send_req().
    */
    list_insert_tail(&hciTempQueue, (tListNode *)hciReadPacket);
    /* Be sure there is at list one packet in the pool to process the expected event. */
    if(list_is_empty(&hciReadPktPool)){
      pListNode tmp_node;      
      list_remove_head(&hciReadPktRxQueue, &tmp_node);
      list_insert_tail(&hciReadPktPool, tmp_node);      
    }
    
    Enable_SPI_IRQ();
    
  }
  
failed: 
  move_list(&hciReadPktRxQueue, &hciTempQueue);  
  Enable_SPI_IRQ();
  return -1;
  
done:
  // Insert the packet back into the pool.
  list_insert_head(&hciReadPktPool, (tListNode *)hciReadPacket); 
  move_list(&hciReadPktRxQueue, &hciTempQueue);
  
  Enable_SPI_IRQ();
  return 0;
}
Пример #11
0
/**
 * Initialize packet socket handler
 * @return always returns 0
 */
static int
_init(void) {
  list_init_head(&_packet_sockets);
  return 0;
}
Пример #12
0
/* TODO DK: check automatically if all merges were done properly! */
static void tc_alloc_dealloc(void *ctx)
{
  test_framework_t *tf = ctx;  
  list_node_t *iter;
  list_head_t head;
  page_idx_t allocated, saved_ap, resr = 0;
  page_frame_t *pages;
  page_idx_t c;

  tf->printf("Target MM pool: %s\n", tlsf_ctx.pool->name);
  tf->printf("Number of allocatable pages: %d\n", tlsf_ctx.pool->free_pages);
  saved_ap = atomic_get(&tlsf_ctx.pool->free_pages);

#ifdef CONFIG_SMP
  for_each_cpu(c) {
    if (!tlsf_ctx.tlsf->percpu[c] || !c)
      continue;

    resr += tlsf_ctx.tlsf->percpu[c]->noc_pages;
  }
#endif /* CONFIG_SMP */

  tf->printf("Allocate all possible pages one-by-one...\n");
  list_init_head(&head);
  allocated = 0;
  for (;;) {
    pages = alloc_page(AF_ZERO);
    tlsf_validate_dbg(tlsf_ctx.tlsf);
    if (!pages)
      break;

    list_add2tail(&head, &pages->chain_node);
    allocated++;    
  }
  if (atomic_get(&tlsf_ctx.pool->free_pages) != resr) {
    tf->printf("Failed to allocate %d pages. %d pages rest\n",
               saved_ap, atomic_get(&tlsf_ctx.pool->free_pages));
    tf->failed();
  }
  if (allocated != saved_ap) {
    tf->printf("Not all pages was allocated from TLSF.\n");
    tf->printf("Total: %d. Allocated: %d\n", saved_ap, allocated);
  }
  
  mmpool_allocator_dump(tlsf_ctx.pool);
  tf->printf("Free allocated %d pages.\n", allocated);
  pages = list_entry(list_node_first(&head), page_frame_t, chain_node);
  list_cut_head(&head);
  free_pages_chain(pages);
  if (atomic_get(&tlsf_ctx.pool->free_pages) != saved_ap) {
    tf->printf("Not all pages were fried: %d rest (%d total)\n",
               saved_ap - atomic_get(&tlsf_ctx.pool->free_pages), saved_ap);
    tf->failed();
  }

  mmpool_allocator_dump(tlsf_ctx.pool);
  tf->printf("Allocate all possible pages usign non-continous allocation\n");
  pages = alloc_pages(saved_ap - resr, AF_ZERO | AF_USER);
  if (!pages) {
    tf->printf("Failed to allocate non-continous %d pages!\n", saved_ap);
    tf->failed();
  }

  tlsf_validate_dbg(tlsf_ctx.tlsf);
  mmpool_allocator_dump(tlsf_ctx.pool);
  allocated = 0;
  list_set_head(&head, &pages->chain_node);
  list_for_each(&head, iter)
    allocated++;
  if (allocated != (saved_ap - resr)) {
    tf->printf("Invalid number of pages allocated: %d (%d was expected)\n", allocated, saved_ap - resr);
    tf->failed();
  }

  list_cut_head(&head);
  free_pages_chain(pages);
  if (atomic_get(&tlsf_ctx.pool->free_pages) != saved_ap) {
    tf->printf("Not all pages were fried: %d rest (%d total)\n",
               saved_ap - atomic_get(&tlsf_ctx.pool->free_pages), saved_ap);
    tf->failed();
  }

  mmpool_allocator_dump(tlsf_ctx.pool);
  tlsf_ctx.completed = true;
  sys_exit(0);
}
Пример #13
0
int hci_send_req(struct hci_request *r, BOOL async)
{
  uint8_t *ptr;
  uint16_t opcode = htobs(cmd_opcode_pack(r->ogf, r->ocf));
  hci_event_pckt *event_pckt;
  hci_uart_pckt *hci_hdr;
  int to = /*1;*/ DEFAULT_TIMEOUT;
  struct timer t;
  tHciDataPacket * hciReadPacket = NULL;
  tListNode hciTempQueue;
  
  list_init_head((tListNode*)&hciTempQueue);

  // cannot be processed, due to reentrancy
  if (hciAwaitReply) {
    return -1;
  }

  hciAwaitReply = TRUE;
  
  hci_send_cmd(r->ogf, r->ocf, r->clen, r->cparam);
  
  if(async){
    goto done;
  }
  
  /* Minimum timeout is 1. */
  if(to == 0)
    to = 1;
  
  Timer_Set(&t, to);
  
  while(1) {
    evt_cmd_complete *cc;
    evt_cmd_status *cs;
    evt_le_meta_event *me;
    int len;

    // we're done with the sending, wait for a reply from the bluenrg
    io_seproxyhal_general_status();

    // perform io_event based loop to wait for BLUENRG_RECV_EVENT
    for (;;) {
      io_seproxyhal_spi_recv(G_io_seproxyhal_spi_buffer, sizeof(G_io_seproxyhal_spi_buffer), 0);
      // check if event is a ticker event
      unsigned int ticker_event = G_io_seproxyhal_spi_buffer[0] == SEPROXYHAL_TAG_TICKER_EVENT;

      // process IOs, and BLE fetch, ble queue is updated through common code
      io_seproxyhal_handle_event();

      // don't ack the BLUENRG_RECV_EVENT as we would require to reply another command to it.
      if(!list_is_empty((tListNode*)&hciReadPktRxQueue)){
        /* Extract packet from HCI event queue. */
        //Disable_SPI_IRQ();
        list_remove_head((tListNode*)&hciReadPktRxQueue, (tListNode **)&hciReadPacket);    
        list_insert_tail((tListNode*)&hciTempQueue, (tListNode *)hciReadPacket);
        
        hci_hdr = (void *)hciReadPacket->dataBuff;
        if(hci_hdr->type != HCI_EVENT_PKT){
          move_list((tListNode*)&hciReadPktPool, (tListNode*)&hciTempQueue);  
          //list_insert_tail((tListNode*)&hciTempQueue, (tListNode *)hciReadPacket); // See comment below
          //Enable_SPI_IRQ();
          goto case_USER_PROCESS;
        }
        
        event_pckt = (void *) (hci_hdr->data);
        
        ptr = hciReadPacket->dataBuff + (1 + HCI_EVENT_HDR_SIZE);
        len = hciReadPacket->data_len - (1 + HCI_EVENT_HDR_SIZE);
        
        /* In the meantime there could be other events from the controller.
        In this case, insert the packet in a different queue. These packets will be
        inserted back in the main queue just before exiting from send_req().
        */

        event_pckt = (void *) (hci_hdr->data);
        switch (event_pckt->evt) {
          
        case EVT_CMD_STATUS:
          cs = (void *) ptr;
          
          if (cs->opcode != opcode) {
            goto case_USER_PROCESS;
          }
          
          if (r->event != EVT_CMD_STATUS) {
            goto case_USER_PROCESS;
          }
          
          r->rlen = MIN(len, r->rlen);
          Osal_MemCpy(r->rparam, ptr, r->rlen);
          goto done;
          
        case EVT_CMD_COMPLETE:
          cc = (void *) ptr;
          
          if (cc->opcode != opcode) {
            goto case_USER_PROCESS;
          }
          
          ptr += EVT_CMD_COMPLETE_SIZE;
          len -= EVT_CMD_COMPLETE_SIZE;
          
          r->rlen = MIN(len, r->rlen);
          Osal_MemCpy(r->rparam, ptr, r->rlen);
          goto done;
          
        case EVT_LE_META_EVENT:
          me = (void *) ptr;
          
          if (me->subevent != r->event) {
            goto case_USER_PROCESS;
          }
          
          len -= 1;
          r->rlen = MIN(len, r->rlen);
          Osal_MemCpy(r->rparam, me->data, r->rlen);
          goto done;
          
        case EVT_HARDWARE_ERROR:
          return -1;

        default:      
        case_USER_PROCESS:
          HCI_Event_CB(hciReadPacket->dataBuff);
          break;
        }
      }

      // timeout
      if (ticker_event) {
        if (to) {
          to--;
        }
        // don't signal timeout if the event has been closed by handle event to avoid sending commands after a status has been issued
        else if (!io_seproxyhal_spi_is_status_sent()) {
          return -1;
        }
      }

      // ack the received event we have processed
      io_seproxyhal_general_status();
    }
    
    
    //Enable_SPI_IRQ();
    
  }
  
failed: 
  move_list((tListNode*)&hciReadPktPool, (tListNode*)&hciTempQueue);  
  hciAwaitReply = FALSE;
  //Enable_SPI_IRQ();
  return -1;
  
done:
  // Insert the packet back into the pool.
  /*
  if (hciReadPacket) {
    list_insert_head((tListNode*)&hciReadPktPool, (tListNode *)hciReadPacket); 
  }
  */
  move_list((tListNode*)&hciReadPktPool, (tListNode*)&hciTempQueue);
  hciAwaitReply = FALSE;
  //Enable_SPI_IRQ();
  return 0;
}