static void spi_tx_end(void) { struct spi_dev *dev = &spi_dev; u16 limit; u8 status = 0; /* * Wait for not busy. This also serves as padding. */ limit = ASPI_LEN_MAX + 1; do { status = spi_platform_io(0); if (!(status & ASPI_MSTAT_BUSY)) { break; } } while (limit-- > 0); spi_platform_slave_deselect(); if (status & ASPI_MSTAT_BUSY) { spi_platform_slave_select(); STATS(tx_hung); return; } if (status & ASPI_MSTAT_ERR) { spi_platform_slave_select(); STATS(tx_err); dev->state = SS_TX_WAIT; return; } STATS(tx_ok); dev->state = SS_IDLE; dev->tx_ready = 0; return; }
void AT_collect() { clock_t start, mark, sweep; clock_t user; FILE *file = gc_f; int size; /* snapshop*/ for(size=MIN_TERM_SIZE; size<MAX_TERM_SIZE; size++) { nb_live_blocks_before_last_gc[size] = at_nrblocks[size]; nb_reclaimed_blocks_during_last_gc[size]=0; nb_reclaimed_cells_during_last_gc[size]=0; } at_gc_count++; if (!silent) { fprintf(file, "collecting garbage..(%d)",at_gc_count); fflush(file); } start = clock(); mark_phase(); mark = clock(); user = mark - start; STATS(mark_time, user); sweep_phase(); sweep = clock(); user = sweep - mark; STATS(sweep_time, user); if (!silent) fprintf(file, "..\n"); }
/* * Polls for non-busy status */ static int spi_tx_check_busy(u8 mask) { struct spi_dev *dev = &spi_dev; u8 limit = 16; u8 status; /* * Poll for valid, non-busy status. */ do { status = spi_platform_io(0); if (!(status & mask)) { break; } spi_delay(); } while (limit-- > 0); if (status & ASPI_MSTAT_INVAL) { STATS(tx_stat_inval); return 1; } if (status & mask) { if (status & ASPI_MSTAT_ATTN) { dev->state = SS_RX_WAIT; return 2; } STATS(tx_still_busy); return 3; } return 0; }
/** * Process TWI interrupts. * Assumes that only valid interrupts will be enabled and that twi_mask * will have been set to only contain the valid bits for the current * I/O state. This means that we do not have to test this state at * interrupt time. */ void twi_isr_C(void) { U32 status = *AT91C_TWI_SR & twi_mask; if (status & AT91C_TWI_RXRDY) { STATS(twi_stats.bytes_rx++) *twi_ptr++ = *AT91C_TWI_RHR; twi_pending--; if (twi_pending == 1) { /* second last byte -- issue a stop on the next byte */ *AT91C_TWI_CR = AT91C_TWI_STOP; } if (!twi_pending) { // All bytes have been sent. Mark operation as complete. STATS(twi_stats.rx_done++) twi_state = TWI_DONE; *AT91C_TWI_IDR = AT91C_TWI_RXRDY; } } else if (status & AT91C_TWI_TXRDY) { if (twi_pending) { /* Still Stuff to send */ *AT91C_TWI_THR = *twi_ptr++; twi_pending--; STATS(twi_stats.bytes_tx++) } else { // everything has been sent, now wait for complete STATS(twi_stats.tx_done++); *AT91C_TWI_IDR = AT91C_TWI_TXRDY; *AT91C_TWI_IER = AT91C_TWI_TXCOMP; twi_mask = AT91C_TWI_TXCOMP|AT91C_TWI_NACK; } }
/* called when a packet did not ack after watchdogtimeout */ static void ts27010_mux_net_tx_timeout(struct net_device *net) { /* Tell syslog we are hosed. */ dev_dbg(&net->dev, "Tx timed out.\n"); /* Update statistics */ STATS(net).tx_errors++; }
/* * Wait for non-busy with ATTN status before receive. */ static void spi_rx_wait(void) { struct spi_dev *dev = &spi_dev; int i; u8 status; /* * Discard first status to let slave update its data register. */ (void)spi_platform_io(0); /* * Check for non-busy, valid status from module * If no attention from module, just return. */ i = MAX_ATTEMPTS; do { status = spi_platform_io(0); if (!(status & (ASPI_MSTAT_INVAL | ASPI_MSTAT_BUSY))) { break; } spi_delay(); } while (i-- > 0); if (status & ASPI_MSTAT_INVAL) { STATS(rx_stat_inval); spi_platform_slave_deselect(); spi_platform_slave_select(); return; } if (status & ASPI_MSTAT_BUSY) { STATS(rx_hung); spi_platform_slave_deselect(); spi_platform_slave_select(); return; } if (!(status & ASPI_MSTAT_ATTN) && !dev->rx_retry_ct) { STATS(rx_no_attn); dev->state = SS_IDLE; spi_platform_slave_deselect(); return; } dev->state = SS_RX; spi_rx(); }
void AT_collect_minor() { struct tms start, mark, sweep; clock_t user; FILE *file = gc_f; int size; /* snapshop*/ for(size=MIN_TERM_SIZE; size<AT_getMaxTermSize(); size++) { TermInfo* ti = &terminfo[size]; ti->nb_live_blocks_before_last_gc = ti->at_nrblocks; ti->nb_reclaimed_blocks_during_last_gc=0; ti->nb_reclaimed_cells_during_last_gc=0; } at_gc_count++; if (!silent) { fprintf(file, "young collecting garbage..(%d)",at_gc_count); fflush(file); } times(&start); CHECK_UNMARKED_BLOCK(AT_BLOCK); CHECK_UNMARKED_BLOCK(AT_OLD_BLOCK); /*nb_cell_in_stack=0;*/ mark_phase_young(); /*fprintf(stderr,"AT_collect_young: nb_cell_in_stack = %d\n",nb_cell_in_stack++);*/ times(&mark); user = mark.tms_utime - start.tms_utime; STATS(mark_time, user); minor_sweep_phase_young(); CHECK_UNMARKED_BLOCK(AT_BLOCK); CHECK_UNMARKED_BLOCK(AT_OLD_BLOCK); times(&sweep); user = sweep.tms_utime - mark.tms_utime; STATS(sweep_time, user); if (!silent) fprintf(file, "..\n"); }
adlb_code xlb_handle_steal(int caller, const struct packed_steal *req, const int *work_type_counts) { TRACE_START; MPE_LOG(xlb_mpe_svr_steal_start); DEBUG("\t caller: %i", caller); adlb_code code; /* setup callback */ steal_cb_state state; state.stealer_rank = caller; state.max_size = XLB_STEAL_CHUNK_SIZE; state.work_units = malloc(sizeof(*state.work_units) * state.max_size); state.size = 0; state.stole_count = 0; xlb_workq_steal_callback cb; cb.f = handle_steal_callback; cb.data = &state; // Maximum amount of memory to return- currently unused // Call steal. This function will call back to send messages code = xlb_workq_steal(req->max_memory, work_type_counts, cb); ADLB_CHECK(code); // send any remaining. If nothing left (or nothing was stolen) // this will notify stealer we're done code = send_steal_batch(&state, true); ADLB_CHECK(code); free(state.work_units); if (state.stole_count > 0) { // Update idle check attempt if needed to account for work being // moved around. int64_t thief_idle_check_attempt = req->idle_check_attempt; if (thief_idle_check_attempt > xlb_idle_check_attempt) { DEBUG("Update idle check attempt from thief: %"PRId64, thief_idle_check_attempt); xlb_idle_check_attempt = thief_idle_check_attempt; } } DEBUG("[%i] steal result: sent %i tasks to %i", xlb_s.layout.rank, state.stole_count, caller); STATS("LOST: %i", state.stole_count); // MPE_INFO(xlb_mpe_svr_info, "LOST: %i TO: %i", state.stole_count, caller); MPE_LOG(xlb_mpe_svr_steal_end); TRACE_END; return ADLB_SUCCESS; }
void AT_collect() { struct tms start, mark, sweep; clock_t user; FILE *file = gc_f; int size; /* snapshot*/ for(size=MIN_TERM_SIZE; size<MAX_TERM_SIZE; size++) { nb_live_blocks_before_last_gc[size] = at_nrblocks[size]; nb_reclaimed_blocks_during_last_gc[size]=0; nb_reclaimed_cells_during_last_gc[size]=0; } at_gc_count++; if (!silent) { fprintf(file, "collecting garbage..(%d)",at_gc_count); fflush(file); } times(&start); CHECK_UNMARKED_BLOCK(at_blocks); CHECK_UNMARKED_BLOCK(at_old_blocks); mark_phase(); times(&mark); user = mark.tms_utime - start.tms_utime; STATS(mark_time, user); sweep_phase(); times(&sweep); user = sweep.tms_utime - mark.tms_utime; STATS(sweep_time, user); if (!silent) { fprintf(file, "..\n"); } }
void AT_collect_minor() { clock_t start, mark, sweep; clock_t user; FILE *file = gc_f; int size; /* snapshop*/ for(size=MIN_TERM_SIZE; size<AT_getMaxTermSize(); size++) { TermInfo* ti = &terminfo[size]; ti->nb_live_blocks_before_last_gc = ti->at_nrblocks; ti->nb_reclaimed_blocks_during_last_gc=0; ti->nb_reclaimed_cells_during_last_gc=0; } at_gc_count++; if (!silent) { fprintf(file, "young collecting garbage..(%d)",at_gc_count); fflush(file); } start = clock(); /* was minor_mark_phase_young(); this should be verified! */ mark_phase_young(); mark = clock(); user = mark - start; STATS(mark_time, user); minor_sweep_phase_young(); sweep = clock(); user = sweep - mark; STATS(sweep_time, user); if (!silent) fprintf(file, "..\n"); }
/* * Receive response from datapoint create or status request. */ void prop_dp_resp(struct ayla_cmd *cmd, void *buf, size_t len) { struct prop_dp *dp = prop_dp_active; STATS(rx_resp); if (!dp) { STATS(rx_not_active); return; } if (ntohs(cmd->req_id) != dp->req_id) { STATS(rx_bad_req_id); return; } switch (dp->state) { case DS_CREATE_RESP: prop_dp_create_resp(dp, buf, len); break; case DS_RECV: prop_dp_rx(dp, buf, len); break; default: break; } }
//<block> -> BEGIN <var> <stats> END APTNode* BLOCK(void) { if (strcmp(currentTok.sym, "BEGINtk") == 0) { scanner(); //BUILD <VAR> NODE APTNode* varNode = VAR(); APTNode* statsNode = STATS(); if (strcmp(currentTok.sym, "ENDtk") == 0) { scanner(); //BUILD <BLOCK> NODE APTNode* blockNode = createNonIdAPTNode("<BLOCKtk>"); addChildNode(blockNode, varNode); addChildNode(blockNode, statsNode); return blockNode; } else errMsg("ENDtk"); } else errMsg("BEGINtk"); }
void major_sweep_phase_old() { int size, perc; int reclaiming = 0; int alive = 0; for(size=MIN_TERM_SIZE; size<MAX_TERM_SIZE; size++) { Block *prev_block = NULL; Block *next_block; Block *block = at_old_blocks[size]; while(block) { /* set empty = 0 to avoid recycling*/ int empty = 1; int alive_in_block = 0; int dead_in_block = 0; int free_in_block = 0; int capacity = ((block->end)-(block->data))/size; header_type *cur; assert(block->size == size); for(cur=block->data ; cur<block->end ; cur+=size) { /* TODO: Optimisation*/ ATerm t = (ATerm)cur; if(IS_MARKED(t->header)) { CLR_MARK(t->header); alive_in_block++; empty = 0; assert(IS_OLD(t->header)); } else { switch(ATgetType(t)) { case AT_FREE: assert(IS_YOUNG(t->header)); free_in_block++; break; case AT_INT: case AT_REAL: case AT_APPL: case AT_LIST: case AT_PLACEHOLDER: case AT_BLOB: assert(IS_OLD(t->header)); AT_freeTerm(size, t); t->header=FREE_HEADER; dead_in_block++; break; case AT_SYMBOL: assert(IS_OLD(t->header)); AT_freeSymbol((SymEntry)t); t->header=FREE_HEADER; dead_in_block++; break; default: ATabort("panic in sweep phase\n"); } } } assert(alive_in_block + dead_in_block + free_in_block == capacity); next_block = block->next_by_size; #ifndef NDEBUG if(empty) { for(cur=block->data; cur<block->end; cur+=size) { assert(ATgetType((ATerm)cur) == AT_FREE); } } #endif if(empty) { /* DO NOT RESTORE THE FREE LIST: free cells have not been inserted*/ /* at_freelist[size] = old_freelist;*/ assert(top_at_blocks[size] < block->data || top_at_blocks[size] > block->end); #ifdef GC_VERBOSE fprintf(stderr,"MAJOR OLD: reclaim empty block %p\n",block); #endif reclaim_empty_block(at_old_blocks, size, block, prev_block); } else if(0 && 100*alive_in_block/capacity <= TO_YOUNG_RATIO) { promote_block_to_young(size, block, prev_block); old_bytes_in_young_blocks_after_last_major += (alive_in_block*SIZE_TO_BYTES(size)); } else { old_bytes_in_old_blocks_after_last_major += (alive_in_block*SIZE_TO_BYTES(size)); /* DO NOT FORGET THIS LINE*/ /* update the previous block*/ prev_block = block; } block = next_block; alive += alive_in_block; reclaiming += dead_in_block; } } if(alive) { perc = (100*reclaiming)/alive; STATS(reclaim_perc, perc); } }
VOIDCDECL mark_phase_young() { unsigned int i,j; unsigned long stack_size; ATerm *stackTop; ATerm *start, *stop; ProtEntry *prot; #ifdef WIN32 unsigned int r_eax, r_ebx, r_ecx, r_edx, \ r_esi, r_edi, r_esp, r_ebp; ATerm reg[8], *real_term; __asm { /* Get the registers into local variables to check them for aterms later. */ mov r_eax, eax mov r_ebx, ebx mov r_ecx, ecx mov r_edx, edx mov r_esi, esi mov r_edi, edi mov r_esp, esp mov r_ebp, ebp } /* Put the register-values into an array */ reg[0] = (ATerm) r_eax; reg[1] = (ATerm) r_ebx; reg[2] = (ATerm) r_ecx; reg[3] = (ATerm) r_edx; reg[4] = (ATerm) r_esi; reg[5] = (ATerm) r_edi; reg[6] = (ATerm) r_esp; reg[7] = (ATerm) r_ebp; for(i=0; i<8; i++) { real_term = AT_isInsideValidTerm(reg[i]); if (real_term != NULL) { AT_markTerm_young(real_term); } if (AT_isValidSymbol((Symbol)reg[i])) { AT_markSymbol_young((Symbol)reg[i]); } } /* The register variables are on the stack aswell I set them to zero so they won't be processed again when the stack is traversed. The reg-array is also in the stack but that will be adjusted later */ r_eax = 0; r_ebx = 0; r_ecx = 0; r_edx = 0; r_esi = 0; r_edi = 0; r_esp = 0; r_ebp = 0; #else sigjmp_buf env; /* Traverse possible register variables */ sigsetjmp(env,0); start = (ATerm *)((char *)env); stop = ((ATerm *)(((char *)env) + sizeof(sigjmp_buf))); mark_memory_young(start, stop); #endif stackTop = stack_top(); start = MIN(stackTop, stackBot); stop = MAX(stackTop, stackBot); stack_size = stop-start; STATS(stack_depth, stack_size); mark_memory_young(start, stop); /* Traverse protected terms */ for(i=0; i<at_prot_table_size; i++) { ProtEntry *cur = at_prot_table[i]; while(cur) { for(j=0; j<cur->size; j++) { if(cur->start[j]) AT_markTerm_young(cur->start[j]); } cur = cur->next; } } for (prot=at_prot_memory; prot != NULL; prot=prot->next) { mark_memory_young((ATerm *)prot->start, (ATerm *)(((char *)prot->start) + prot->size)); } AT_markProtectedSymbols_young(); /* Mark 'parked' symbol */ if (AT_isValidSymbol(at_parked_symbol)) { /*fprintf(stderr,"mark_phase_young: AT_markSymbol_young(%d)\n",at_parked_symbol);*/ AT_markSymbol_young(at_parked_symbol); } }
/* * Send transmit command and wait until slave signals it is ready for data. * If it is ready, send data. */ static void spi_tx_start(void) { struct spi_dev *dev = &spi_dev; u8 *bp; u16 limit; u16 rem; u16 len; u8 cmd; u8 status; /* * Send command until busy status seen. */ len = dev->tx_len + 1; /* include CRC byte in length */ rem = ASPI_LEN(len); cmd = ASPI_CMD_MO + rem; limit = ASPI_XTRA_CMDS + 1; do { status = spi_platform_io(cmd); if (status & ASPI_MSTAT_BUSY) { break; } /* * spi_delay after a few times if still not accepted. */ if (limit < ASPI_XTRA_CMDS - 2) { spi_delay(); } } while (limit-- > 0); if (status & ASPI_MSTAT_INVAL) { STATS(tx_stat_inval); return; } if (!(status & ASPI_MSTAT_BUSY)) { STATS(tx_busy); return; } /* * Send length bytes. */ rem = len; spi_platform_io(rem >> 8); spi_platform_io(rem & 0xff); /* * Send payload. */ spi_platform_crc_en(); rem = len - 2; bp = spi_tx_buf; while (rem-- > 0) { spi_platform_io(*bp++); } spi_platform_io_crc(*bp++); /* send last byte and CRC */ (void)spi_platform_crc_err(); /* clear CRC status */ dev->state = SS_TX_END; spi_tx_end(); }
}; #define STATS(a, b, c, d, e, f, g, h) \ { \ .read_hits = a, \ .read_misses = b, \ .read_ahead_transfers = c, \ .read_blocks = d, \ .read_errors = e, \ .write_transfers = f, \ .write_blocks = g, \ .write_errors = h \ } static const rtems_blkdev_stats expected_stats [ACTION_COUNT] = { STATS(0, 1, 0, 1, 0, 0, 0, 0), STATS(0, 2, 1, 3, 0, 0, 0, 0), STATS(1, 2, 2, 4, 0, 0, 0, 0), STATS(2, 2, 2, 4, 0, 0, 0, 0), STATS(2, 2, 2, 4, 0, 1, 1, 0), STATS(2, 3, 2, 5, 1, 1, 1, 0), STATS(2, 3, 2, 5, 1, 2, 2, 1) }; static const int expected_block_access_counts [ACTION_COUNT] [BLOCK_COUNT] = { { 1, 0, 0, 0, 0, 0 }, { 1, 1, 1, 0, 0, 0 }, { 1, 1, 1, 1, 0, 0 }, { 1, 1, 1, 1, 0, 0 }, { 1, 1, 1, 1, 1, 0 }, { 1, 1, 1, 1, 1, 1 },
/* * Send receive command and wait for it to be echoed. */ static void spi_rx(void) { struct spi_dev *dev = &spi_dev; u16 rx_len; int i; int j; u8 byte; /* * Send CMD_MI and wait for reply of 0xf1 start. */ for (i = 0; i < MAX_ATTEMPTS; i++) { byte = spi_platform_io(dev->rx_retry_ct ? ASPI_CMD_MI_RETRY : ASPI_CMD_MI); if (byte == ASPI_CMD_MI) { break; } spi_delay(); } if (byte != ASPI_CMD_MI) { STATS(rx_no_start); return; } /* * Read until non-start byte appears. * That will be the most-significant length byte */ do { byte = spi_platform_io(0); } while (byte == ASPI_CMD_MI); /* * Read low-order byte of length. */ rx_len = (byte << 8) | spi_platform_io(0); if (rx_len < 2) { STATS(rx_too_short); goto flush_input; } /* * If length is excessive, don't trust it. */ if (rx_len > ASPI_LEN_MAX || rx_len > sizeof(spi_rx_buf)) { STATS(rx_too_long); goto flush_input; } spi_platform_crc_en(); rx_len -= 2; /* handle last byte and CRC after loop */ for (j = 0; j < rx_len; j++) { spi_rx_buf[j] = spi_platform_io(j & 0x7f); /* XXX */ } spi_rx_buf[j] = spi_platform_io_crc(1); rx_len++; if (spi_platform_crc_err()) { STATS(rx_crc); goto do_retry; } if (rx_len > sizeof(spi_rx_buf)) { rx_len = sizeof(spi_rx_buf); } STATS(rx_ok); dev->rx_retry_ct = 0; dev->state = SS_IDLE; spi_platform_slave_deselect(); spi_stats.rx_len = rx_len; /* for debugging */ if (serial_process_inc_pkt(spi_rx_buf, rx_len)) { STATS(rx_unk_proto); } return; flush_input: for (i = 0; i < ASPI_LEN_MAX; i++) { (void)spi_platform_io(0); } do_retry: spi_platform_slave_deselect(); if (dev->rx_retry_ct++ < SPI_CRC_RETRIES) { STATS(rx_retry); dev->state = SS_RX_WAIT; spi_platform_slave_select(); } else { STATS(rx_retry_limited); dev->rx_retry_ct = 0; dev->state = SS_IDLE; } }
void ts27010_mux_rx_netchar(struct dlci_struct *dlci, unsigned char *in_buf, int size) { FUNC_ENTER(); struct sk_buff *skb; struct net_device *net = dlci->net; struct ts27010_mux_net *mux_net = (struct ts27010_mux_net *)netdev_priv(net); #ifdef ENABLE_MUX_NET_KREF_FEATURE muxnet_get(mux_net); #endif unsigned char *dst; mux_print(MSG_DEBUG, "ttyline = %d \n", dlci->line_no); mux_print(MSG_DEBUG, "net name : %s \n", net->name); mux_print(MSG_MSGDUMP, "net device address: %x \n", net); mux_print(MSG_MSGDUMP, "net->type : %d \n", net->type); mux_print(MSG_DEBUG, "Data length = %d byte\n", size); int i = 0; int j = 15;//195; if (size < j) j = (size / 16) * 16 -1; for (i = 0; i <= j; i = i + 16) { mux_print(MSG_MSGDUMP, "Data %03d - %03d: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, i + 15, *(in_buf + i + 0), *(in_buf + i + 1), *(in_buf + i + 2), *(in_buf + i + 3), *(in_buf + i + 4), *(in_buf + i + 5), *(in_buf + i + 6), *(in_buf + i + 7), *(in_buf + i + 8), *(in_buf + i + 9), *(in_buf + i + 10), *(in_buf + i + 11), *(in_buf + i + 12), *(in_buf + i +13), *(in_buf + i + 14), *(in_buf + i + 15)); } skb = dev_alloc_skb(size + NET_IP_ALIGN); if (skb == NULL) { mux_print(MSG_ERROR, "[%s] cannot allocate skb\n", net->name); /* We got no receive buffer. */ STATS(net).rx_dropped++; } else { skb->dev = net; skb_reserve(skb, NET_IP_ALIGN); dst = skb_put(skb, size); memcpy(dst, in_buf, size); /* Driver in IP mode */ skb->protocol = ts27010net_ip_type_trans(skb, net); mux_print(MSG_MSGDUMP, "skb->protocol : %x \n" , skb->protocol); /* Deliver to network stack */ netif_rx(skb); } /* Update out statistics */ STATS(net).rx_packets++; STATS(net).rx_bytes += size; if ( g_mux_uart_print_level != MSG_LIGHT && g_mux_uart_print_level != MSG_ERROR ) { mux_print(MSG_DEBUG, "All rx %d packets %d bytes\n", STATS(net).rx_packets, STATS(net).rx_bytes); } else { mux_print(MSG_LIGHT, "All rx %d packets %d bytes\n", STATS(net).rx_packets, STATS(net).rx_bytes); } #ifdef ENABLE_MUX_NET_KREF_FEATURE muxnet_put(mux_net); #endif FUNC_EXIT(); return; }
/* * Receive response for data point request. */ static void prop_dp_rx(struct prop_dp *dp, void *buf, size_t len) { struct prop *prop; struct ayla_tlv *tlv = (struct ayla_tlv *)buf; struct ayla_tlv *loc = NULL; size_t offset = 0; u8 eof = 0; void *valp = NULL; size_t val_len = 0; size_t rlen = len; size_t tlen; prop = dp->prop; while (rlen > 0) { if (rlen < sizeof(*tlv)) { STATS(rx_len_err); return; } tlen = tlv->len; if (tlen + sizeof(*tlv) > rlen) { STATS(rx_tlv_len_err); return; } switch (tlv->type) { case ATLV_LOC: loc = tlv; break; case ATLV_BIN: valp = (void *)(tlv + 1); val_len = tlv->len; break; case ATLV_OFF: if (tlv->len != sizeof(u32)) { STATS(rx_tlv_len_err); return; } offset = get_ua_be32((be32 *)(tlv + 1)); break; case ATLV_EOF: eof = 1; break; default: STATS(rx_unk_tlv); break; } tlv = (void *)((char *)(tlv + 1) + tlen); rlen -= sizeof(*tlv) + tlen; } if (!loc) { STATS(rx_no_loc); return; } if (loc->len != dp->loc_len || memcmp(loc + 1, dp->loc, dp->loc_len)) { STATS(rx_bad_loc); return; } if (!dp->prop_set) { STATS(rx_read_only); return; } if (eof) { dp->state = DS_FETCHED; prop->send_mask = ADS_BIT; } dp->prop_set(prop, offset, valp, val_len, eof); }
void major_sweep_phase_young() { int perc; int reclaiming = 0; int alive = 0; int size; old_bytes_in_young_blocks_since_last_major = 0; for(size=MIN_TERM_SIZE; size<MAX_TERM_SIZE; size++) { Block *prev_block = NULL; Block *next_block; ATerm old_freelist; Block *block = at_blocks[size]; header_type *end = top_at_blocks[size]; while(block) { int empty = 1; int alive_in_block = 0; int dead_in_block = 0; int free_in_block = 0; int old_in_block = 0; int young_in_block = 0; int capacity = (end-(block->data))/size; header_type *cur; assert(block->size == size); old_freelist = at_freelist[size]; for(cur=block->data ; cur<end ; cur+=size) { ATerm t = (ATerm)cur; if(IS_MARKED(t->header)) { CLR_MARK(t->header); alive_in_block++; empty = 0; if(IS_OLD(t->header)) { old_in_block++; } else { young_in_block++; } } else { switch(ATgetType(t)) { case AT_FREE: t->aterm.next = at_freelist[size]; at_freelist[size] = t; free_in_block++; break; case AT_INT: case AT_REAL: case AT_APPL: case AT_LIST: case AT_PLACEHOLDER: case AT_BLOB: AT_freeTerm(size, t); t->header = FREE_HEADER; t->aterm.next = at_freelist[size]; at_freelist[size] = t; dead_in_block++; break; case AT_SYMBOL: AT_freeSymbol((SymEntry)t); t->header = FREE_HEADER; t->aterm.next = at_freelist[size]; at_freelist[size] = t; dead_in_block++; break; default: ATabort("panic in sweep phase\n"); } } } assert(alive_in_block + dead_in_block + free_in_block == capacity); next_block = block->next_by_size; #ifndef NDEBUG if(empty) { for(cur=block->data; cur<end; cur+=size) { assert(ATgetType((ATerm)cur) == AT_FREE); } } #endif #ifdef GC_VERBOSE /*fprintf(stderr,"old_cell_in_young_block ratio = %d\n",100*old_in_block/capacity);*/ #endif if(end==block->end && empty) { #ifdef GC_VERBOSE fprintf(stderr,"MAJOR YOUNG: reclaim empty block %p\n",block); #endif at_freelist[size] = old_freelist; reclaim_empty_block(at_blocks, size, block, prev_block); } else if(end==block->end && 100*old_in_block/capacity >= TO_OLD_RATIO) { if(young_in_block == 0) { #ifdef GC_VERBOSE fprintf(stderr,"MAJOR YOUNG: promote block %p to old\n",block); #endif at_freelist[size] = old_freelist; promote_block_to_old(size, block, prev_block); old_bytes_in_old_blocks_after_last_major += (old_in_block*SIZE_TO_BYTES(size)); } else { #ifdef GC_VERBOSE fprintf(stderr,"MAJOR YOUNG: freeze block %p\n",block); #endif SET_FROZEN(block); old_bytes_in_young_blocks_after_last_major += (old_in_block*SIZE_TO_BYTES(size)); at_freelist[size] = old_freelist; prev_block = block; } } else { old_bytes_in_young_blocks_after_last_major += (old_in_block*SIZE_TO_BYTES(size)); prev_block = block; } block = next_block; if(block) { end = block->end; } alive += alive_in_block; reclaiming += dead_in_block; } #ifndef NDEBUG if(at_freelist[size]) { ATerm data; for(data = at_freelist[size] ; data ; data=data->aterm.next) { assert(EQUAL_HEADER(data->header,FREE_HEADER)); assert(ATgetType(data) == AT_FREE); } } #endif } if(alive) { perc = (100*reclaiming)/alive; STATS(reclaim_perc, perc); } }
void minor_sweep_phase_young() { int size, perc; int reclaiming = 0; int alive = 0; old_bytes_in_young_blocks_since_last_major = 0; for(size=MIN_TERM_SIZE; size<MAX_TERM_SIZE; size++) { Block *prev_block = NULL; Block *next_block; ATerm old_freelist; Block *block = at_blocks[size]; header_type *end = top_at_blocks[size]; /* empty the freelist*/ at_freelist[size] = NULL; while(block) { /* set empty = 0 to avoid recycling*/ int empty = 1; int alive_in_block = 0; int dead_in_block = 0; int free_in_block = 0; int old_in_block = 0; int capacity = (end-(block->data))/size; header_type *cur; assert(block->size == size); old_freelist = at_freelist[size]; for(cur=block->data ; cur<end ; cur+=size) { ATerm t = (ATerm)cur; if(IS_MARKED(t->header) || IS_OLD(t->header)) { if(IS_OLD(t->header)) { old_in_block++; } CLR_MARK(t->header); alive_in_block++; empty = 0; assert(!IS_MARKED(t->header)); } else { switch(ATgetType(t)) { case AT_FREE: /* AT_freelist[size] is not empty: so DO NOT ADD t*/ t->aterm.next = at_freelist[size]; at_freelist[size] = t; free_in_block++; break; case AT_INT: case AT_REAL: case AT_APPL: case AT_LIST: case AT_PLACEHOLDER: case AT_BLOB: AT_freeTerm(size, t); t->header = FREE_HEADER; t->aterm.next = at_freelist[size]; at_freelist[size] = t; dead_in_block++; break; case AT_SYMBOL: AT_freeSymbol((SymEntry)t); t->header = FREE_HEADER; t->aterm.next = at_freelist[size]; at_freelist[size] = t; dead_in_block++; break; default: ATabort("panic in sweep phase\n"); } assert(!IS_MARKED(t->header)); } } assert(alive_in_block + dead_in_block + free_in_block == capacity); next_block = block->next_by_size; #ifndef NDEBUG if(empty) { for(cur=block->data; cur<end; cur+=size) { assert(ATgetType((ATerm)cur) == AT_FREE); } } #endif /* Do not reclaim frozen blocks */ if(IS_FROZEN(block)) { at_freelist[size] = old_freelist; } /* TODO: create freeList Old*/ if(0 && empty) { at_freelist[size] = old_freelist; reclaim_empty_block(at_blocks, size, block, prev_block); } else if(0 && 100*old_in_block/capacity >= TO_OLD_RATIO) { promote_block_to_old(size, block, prev_block); } else { old_bytes_in_young_blocks_since_last_major += (old_in_block*SIZE_TO_BYTES(size)); prev_block = block; } block = next_block; if(block) { end = block->end; } alive += alive_in_block; reclaiming += dead_in_block; } #ifndef NDEBUG if(at_freelist[size]) { ATerm data; /*fprintf(stderr,"minor_sweep_phase_young: ensure empty freelist[%d]\n",size);*/ for(data = at_freelist[size] ; data ; data=data->aterm.next) { if(!EQUAL_HEADER(data->header,FREE_HEADER)) { fprintf(stderr,"data = %p header = %x\n",data,(unsigned int) data->header); } assert(EQUAL_HEADER(data->header,FREE_HEADER)); assert(ATgetType(data) == AT_FREE); } } #endif } if(alive) { perc = (100*reclaiming)/alive; STATS(reclaim_perc, perc); } }
static void ts27010_mux_net_tx_work(struct work_struct * work) { struct sk_buff *skb = NULL; struct sk_buff *temp = NULL; struct net_device *net = NULL; struct ts27010_mux_net *mux_net = NULL; struct dlci_struct *dlci = NULL; unsigned char *data = NULL; int len = 0; int ret = 0; int err = 0; FUNC_ENTER(); if ( work == NULL ) { mux_print(MSG_ERROR, "[WQ] work == NULL\n"); goto net_tx_work_end2; } mux_net = container_of(work, struct ts27010_mux_net, net_work.work); if ( mux_net == NULL ) { mux_print(MSG_ERROR, "[WQ] mux_net == NULL\n"); goto net_tx_work_end2; } mutex_lock(&mux_net->net_wq_lock); dlci = mux_net->dlci; if ( dlci == NULL ) { mux_print(MSG_ERROR, "[WQ] dlci == NULL\n"); goto net_tx_work_end; } net = mux_net->net; if ( net == NULL ) { mux_print(MSG_ERROR, "[WQ] net == NULL\n"); goto net_tx_work_end; } if ( skb_queue_len(&mux_net->txhead) == 0 ) { mux_print(MSG_WARNING, "[WQ2] skb_queue_len=[0]\n"); goto net_tx_work_end; } do { skb = skb_peek(&mux_net->txhead); if ( skb == NULL ) { mux_print(MSG_ERROR, "[WQ] skb == NULL\n"); goto net_tx_work_end; } data = kzalloc(skb->len, GFP_ATOMIC); if (!data) { mux_print(MSG_ERROR, "[WQ] buffer kzalloc() failed\n"); goto net_tx_work_end; } err = skb_copy_bits(skb, 0, data, skb->len); if (err < 0) { mux_print(MSG_ERROR, "[WQ] skb_copy_bits() failed - %d\n", err); if ( data != NULL ) { kfree(data); mux_print(MSG_ERROR, "[WQ] free memory addr=[0x%x]\n", data); data = NULL; } goto net_tx_work_end; } len = skb->len; mux_print(MSG_LIGHT, "[WQ] dlci=[%d], data=[%d] \n", dlci->line_no, len); if ( data != NULL && len > 0) { mux_print(MSG_LIGHT, "[WQ] ts27010_mux_uart_line_write+\n"); ret = ts27010_mux_uart_line_write(dlci->line_no, data, len); mux_print(MSG_LIGHT, "[WQ] ts27010_mux_uart_line_write-, ret=[%d]\n", ret); } if (ret < 0 ) { mux_print(MSG_ERROR, "[WQ] ts27010_mux_uart_line_write, ret=[%d]\n", ret); /*try again*/ if ( data != NULL ) { kfree(data); mux_print(MSG_ERROR, "[WQ] free memory addr=[0x%x]\n", data); data = NULL; } mux_net->retry_count ++; queue_delayed_work(mux_net->net_wq, &mux_net->net_work, msecs_to_jiffies(200)); goto net_tx_work_end; } else { mux_net->retry_count = 0; } STATS(net).tx_packets++; STATS(net).tx_bytes += skb->len; if ( g_mux_uart_print_level != MSG_LIGHT && g_mux_uart_print_level != MSG_ERROR ) { mux_print(MSG_DEBUG, "[WQ] All tx %d packets %d bytes\n", STATS(net).tx_packets, STATS(net).tx_bytes); } else { mux_print(MSG_LIGHT, "[WQ] All tx %d packets %d bytes\n", STATS(net).tx_packets, STATS(net).tx_bytes); } if ( data != NULL ) { kfree(data); data = NULL; } temp = skb_dequeue(&mux_net->txhead); /* And tell the kernel when the last transmit started. */ net->trans_start = jiffies; consume_skb(skb); #ifdef ENABLE_MUX_NET_KREF_FEATURE muxnet_put(mux_net); #endif mutex_unlock(&mux_net->net_wq_lock); mutex_lock(&mux_net->net_wq_lock); } while (skb_queue_len(&mux_net->txhead) > 0); net_tx_work_end: if ( mux_net != NULL ) mutex_unlock(&mux_net->net_wq_lock); net_tx_work_end2: mux_print(MSG_LIGHT, "[WQ] net_tx_work_end\n"); FUNC_EXIT(); return; }