int SwitchPortCPU::outputPacket(PriPacket& pkg) { if( pkg.getSourcePort() == CPU_Port_Sn ) { return -1; } uint16 RxLen = 0; uint8* RxData = pkg.getStdStream(&RxLen); if( RxLen < 30 || RxData == 0 ) { std::cout << "SwitchPortCPU send error: len " << (int)RxLen << std::endl; return -1; } #ifdef EZ_DEBUG trace->sendOnePkg(); #endif tsk_lock(); OS_FRAME* frame = alloc_mem (RxLen | 0x80000000); tsk_unlock(); if (frame != NULL) { memcpy( &frame->data[0], RxData, RxLen); put_in_queue(frame); os_evt_set(0x0001, t_tcpTask); return 1; } else { std::cout << "SwitchPortCPU::outputPacket() alloc_mem(" << (int)RxLen << ") error" << std::endl; } return -1; }
/*---------------------------------------------------------------------------- Ethernet Device callback frame received *----------------------------------------------------------------------------*/ void *RxFrame (int size) { OS_FRAME *frame; frame = alloc_mem (size | 0x80000000); /* Flag 0x80000000 to skip sys_error() */ /* call when out of memory. */ if (frame != NULL) { /* 'alloc_mem()' ok */ put_in_queue (frame); return (&frame->data[0]); } return (NULL); }
static void parse_firstkey(struct req_info *req) { int rv; stats.db_firstkey++; rv = put_in_queue(req, REQ_FIRSTKEY, 1, NULL, 0, NULL, 0); if (!rv) { req->reply_err(req, ERR_MEM); return; } }
static void parse_del(struct req_info *req) { int hit, cache_only, sync, rv; const unsigned char *key; uint32_t ksize; ksize = * (uint32_t *) req->payload; ksize = ntohl(ksize); if (req->psize < ksize) { stats.net_broken_req++; req->reply_err(req, ERR_BROKEN); return; } if (settings.read_only) { req->reply_err(req, ERR_RO); return; } FILL_CACHE_FLAG(del); FILL_SYNC_FLAG(); key = req->payload + sizeof(uint32_t); hit = cache_del(cache_table, key, ksize); if (cache_only && hit) { req->reply_mini(req, REP_OK); } else if (cache_only && !hit) { req->reply_mini(req, REP_NOTIN); } else if (!cache_only) { rv = put_in_queue(req, REQ_DEL, sync, key, ksize, NULL, 0); if (!rv) { req->reply_err(req, ERR_MEM); return; } if (!sync) { req->reply_mini(req, REP_OK); } return; } return; }
static void parse_get(const struct req_info *req) { int hit, cache_only, rv; const unsigned char *key; uint32_t ksize; unsigned char *val = NULL; size_t vsize = 0; ksize = * (uint32_t *) req->payload; ksize = ntohl(ksize); if (req->psize < ksize) { stats.net_broken_req++; req->reply_err(req, ERR_BROKEN); return; } FILL_CACHE_FLAG(get); key = req->payload + sizeof(uint32_t); hit = cache_get(cache_table, key, ksize, &val, &vsize); if (cache_only && !hit) { stats.cache_misses++; req->reply_mini(req, REP_CACHE_MISS); return; } else if (!cache_only && !hit) { rv = put_in_queue(req, REQ_GET, 1, key, ksize, NULL, 0); if (!rv) { req->reply_err(req, ERR_MEM); return; } return; } else { stats.cache_hits++; req->reply_long(req, REP_CACHE_HIT, val, vsize); return; } }
static void parse_nextkey(struct req_info *req) { int rv; const unsigned char *key; uint32_t ksize; ksize = * (uint32_t *) req->payload; ksize = ntohl(ksize); if (req->psize < ksize) { stats.net_broken_req++; req->reply_err(req, ERR_BROKEN); return; } stats.db_nextkey++; key = req->payload + sizeof(uint32_t); rv = put_in_queue(req, REQ_NEXTKEY, 1, key, ksize, NULL, 0); if (!rv) { req->reply_err(req, ERR_MEM); return; } }
static void parse_incr(struct req_info *req) { int cres, cache_only, rv; const unsigned char *key; uint32_t ksize; int64_t increment, newval; const int max = 65536; /* Request format: * 4 ksize * ksize key * 8 increment (big endian int64_t) */ ksize = * (uint32_t *) req->payload; ksize = ntohl(ksize); /* Sanity check on sizes: * - ksize + 8 must be < req->psize * - ksize + 8 must be < 2^16 = 64k */ if ( (req->psize < ksize + 8) || ((ksize + 8) > max)) { stats.net_broken_req++; req->reply_err(req, ERR_BROKEN); return; } if (settings.read_only) { req->reply_err(req, ERR_RO); return; } FILL_CACHE_FLAG(incr); key = req->payload + sizeof(uint32_t); increment = ntohll( * (int64_t *) (key + ksize) ); cres = cache_incr(cache_table, key, ksize, increment, &newval); if (cres == -3) { req->reply_err(req, ERR_MEM); return; } else if (cres == -2) { /* the value was not NULL terminated */ req->reply_mini(req, REP_NOMATCH); return; } if (!cache_only) { /* at this point, the cache_incr() was either successful or a * miss, but we don't really care */ rv = put_in_queue(req, REQ_INCR, 1, key, ksize, (unsigned char *) &increment, sizeof(increment)); if (!rv) { req->reply_err(req, ERR_MEM); return; } } else { if (cres == -1) { req->reply_mini(req, REP_NOTIN); } else { newval = htonll(newval); req->reply_long(req, REP_OK, (unsigned char *) &newval, sizeof(newval)); } } return; }
static void parse_set(struct req_info *req) { int rv, cache_only, sync; const unsigned char *key, *val; uint32_t ksize, vsize; const int max = 65536; /* Request format: * 4 ksize * 4 vsize * ksize key * vsize val */ ksize = * (uint32_t *) req->payload; ksize = ntohl(ksize); vsize = * ( ((uint32_t *) req->payload) + 1), vsize = ntohl(vsize); /* Sanity check on sizes: * - ksize and vsize must both be < req->psize * - ksize and vsize must both be < 2^16 = 64k * - ksize + vsize < 2^16 = 64k */ if ( (req->psize < ksize) || (req->psize < vsize) || (ksize > max) || (vsize > max) || ( (ksize + vsize) > max) ) { stats.net_broken_req++; req->reply_err(req, ERR_BROKEN); return; } if (settings.read_only) { req->reply_err(req, ERR_RO); return; } FILL_CACHE_FLAG(set); FILL_SYNC_FLAG(); key = req->payload + sizeof(uint32_t) * 2; val = key + ksize; rv = cache_set(cache_table, key, ksize, val, vsize); if (rv != 0) { req->reply_err(req, ERR_MEM); return; } if (!cache_only) { rv = put_in_queue(req, REQ_SET, sync, key, ksize, val, vsize); if (!rv) { req->reply_err(req, ERR_MEM); return; } if (!sync) { req->reply_mini(req, REP_OK); } return; } else { req->reply_mini(req, REP_OK); } return; }
static void fetch_packet (void) { /* Fetch a packet from DMA buffer and release buffer. */ OS_FRAME *frame; U32 j,ei,si,RxLen; U32 *sp,*dp; for (ei = RxBufIndex; ; RxBufIndex = ei) { /* Scan the receive buffers. */ for (si = ei; ; ) { if (!(Rx_Desc[ei].addr & AT91C_OWNERSHIP_BIT)) { /* End of scan, unused buffers found. */ if (si != ei) { /* Found erroneus fragment, release it. */ ei = si; goto rel; } return; } /* Search for EOF. */ if (Rx_Desc[ei].stat & RD_EOF) { break; } /* Check the SOF-SOF sequence */ if (Rx_Desc[ei].stat & RD_SOF) { /* Found one, this is new start of frame. */ si = ei; } if (++ei == NUM_RX_BUF) ei = 0; if (ei == RxBufIndex) { /* Safety limit to prevent deadlock. */ ei = si; goto rel; } } /* Get frame length. */ RxLen = Rx_Desc[ei].stat & RD_LENGTH_MASK; if (++ei == NUM_RX_BUF) ei = 0; if (RxLen > ETH_MTU) { /* Packet too big, ignore it and free buffer. */ goto rel; } /* Flag 0x80000000 to skip sys_error() call when out of memory. */ frame = alloc_mem (RxLen | 0x80000000); /* if 'alloc_mem()' has failed, ignore this packet. */ if (frame != NULL) { /* Make sure that block is 4-byte aligned */ dp = (U32 *)&frame->data[0]; for ( ; si != ei; RxLen -= ETH_RX_BUF_SIZE) { sp = (U32 *)(Rx_Desc[si].addr & ~0x3); j = RxLen; if (j > ETH_RX_BUF_SIZE) j = ETH_RX_BUF_SIZE; for (j = (j + 3) >> 2; j; j--) { *dp++ = *sp++; } if (++si == NUM_RX_BUF) si = 0; } put_in_queue (frame); } /* Release packet fragments from EMAC IO buffer. */ rel:for (j = RxBufIndex; ; ) { Rx_Desc[j].addr &= ~AT91C_OWNERSHIP_BIT; if (++j == NUM_RX_BUF) j = 0; if (j == ei) break; } }