/* Message processing functions */ void purt_sd_buffer_process_message(void) { switch(queue[0]) { case POLL: atomic_pop(); onPoll(); break; case SUBSTATE: onSubstate(&substate); atomic_pop(); break; case HANDSHAKE: atomic_pop(); PURT_UART_BLOCK_SET_BYTE(4); PURT_UART_BLOCK_SET_BYTE(0xFF); PURT_UART_BLOCK_SET_BYTE(APPLICATION_TYPE); PURT_UART_BLOCK_SET_BYTE(UID); PURT_UART_BLOCK_SET_BYTE(~4); onHandshake(); break; case NONE: break; } }
// tx_cmds() - send all commands out to the network. void tx_cmds() { // local variable instantiation packet tx_packet; volatile uint8_t local_tx_cmd_queue_size; volatile uint8_t tx_length = 0; volatile int8_t val = 0; // atomically get the queue size local_tx_cmd_queue_size = atomic_size(&g_cmd_tx_queue, g_cmd_tx_queue_mux); // print out task header if((TRUE == g_verbose) && (0 < local_tx_cmd_queue_size)) { nrk_kprintf(PSTR("tx_cmds...\r\n")); } // loop on queue size received above, and no more. for(uint8_t i = 0; i < local_tx_cmd_queue_size; i++) { nrk_led_set(ORANGE_LED); // get a packet out of the queue. atomic_pop(&g_cmd_tx_queue, &tx_packet, g_cmd_tx_queue_mux); // assemble the packet and senx tx_length = assemble_packet((uint8_t *)&g_net_tx_buf, &tx_packet); val = bmac_tx_pkt(g_net_tx_buf, tx_length); if(NRK_OK != val){ nrk_kprintf( PSTR( "NO ack or Reserve Violated!\r\n" )); } nrk_led_clr(ORANGE_LED); } return; }
// tx_data_task() - send standard messages out to the network (i.e. handshake messages, etc.) void tx_data() { // local variable initialization packet tx_packet; volatile int8_t val = 0; volatile uint8_t sent_heart = FALSE; volatile uint8_t to_send; volatile uint8_t tx_length = 0; volatile uint8_t local_tx_data_queue_size; volatile msg_type tx_type; // atomically get the queue size local_tx_data_queue_size = atomic_size(&g_data_tx_queue, g_data_tx_queue_mux); // print out task header if((TRUE == g_verbose) && (0 < local_tx_data_queue_size)){ nrk_kprintf(PSTR("tx_data...\r\n")); } // loop on queue size received above, and no more. for(uint8_t i = 0; i < local_tx_data_queue_size; i++) { nrk_led_set(ORANGE_LED); // get a packet out of the queue. atomic_pop(&g_data_tx_queue, &tx_packet, g_data_tx_queue_mux); // get packet parameters tx_type = tx_packet.type; // only hop one heartbeat per iteration. if(((MSG_HEARTBEAT == tx_type) || (MSG_RESET == tx_type)) && (TRUE == sent_heart)) { to_send = FALSE; } else { to_send = TRUE; } if (TRUE == to_send) { // assembe and send packet tx_length = assemble_packet((uint8_t *)&g_net_tx_buf, &tx_packet); val = bmac_tx_pkt(g_net_tx_buf, tx_length); if(NRK_OK != val){ nrk_kprintf( PSTR( "NO ack or Reserve Violated!\r\n" )); } // set flag if(MSG_HEARTBEAT == tx_type){ sent_heart = TRUE; } } nrk_led_clr(ORANGE_LED); } return; }
static void ion_clean_and_init_allocated_pages( struct ion_system_heap *heap, struct scatterlist *sgl, int nents, bool memory_zero) { int i; struct scatterlist *sg; size_t sum = 0; int page_idx; unsigned long vaddr; pte_t *ptep; down(&heap->vm_sem); page_idx = atomic_pop(&heap->page_idx, VM_PAGE_COUNT_WIDTH); BUG_ON((page_idx < 0) || (page_idx >= VM_PAGE_COUNT)); ptep = heap->pte[page_idx * (SZ_1M / PAGE_SIZE)]; vaddr = (unsigned long)heap->reserved_vm_area->addr + (SZ_1M * page_idx); for_each_sg(sgl, sg, nents, i) { int j; if (!PageHighMem(sg_page(sg))) { memset(page_address(sg_page(sg)), 0, sg_dma_len(sg)); continue; } for (j = 0; j < (sg_dma_len(sg) / PAGE_SIZE); j++) { set_pte_at(&init_mm, vaddr, ptep, mk_pte(sg_page(sg) + j, PAGE_KERNEL)); ptep++; vaddr += PAGE_SIZE; } sum += j * PAGE_SIZE; if (sum == SZ_1M) { ptep = heap->pte[page_idx * (SZ_1M / PAGE_SIZE)]; vaddr = (unsigned long)heap->reserved_vm_area->addr + (SZ_1M * page_idx); ion_clean_and_unmap(vaddr, ptep, sum, memory_zero); sum = 0; } }
/* * hzp_free_int - remove TLS from list */ void hzp_free(void) { atomicst_t *whead; local_hzp.flags.used = false; if(unlikely(mutex_lock(&hzp_free_lock))) return; /* travers list of threads */ whead = deatomic(&hzp_threads.head); while(atomic_sread(whead)) { struct hzp *entry = container_of(deatomic(atomic_sread(whead)), struct hzp, lst); if(entry->flags.used) { /* shut up, we want to travers the list... */ whead = deatomic(atomic_sread(whead)); } else { atomic_pop(whead); break; } } mutex_unlock(&hzp_free_lock); }