static int handle_gen_queued(struct task_gen_server *task) { uint8_t out[MAX_PKT_BURST]; struct bundle_ctx *conn; struct pkt_tuple pkt_tuple; struct l4_meta l4_meta; uint16_t j; uint16_t cancelled = 0; int ret; if (task->cur_mbufs_beg == task->cur_mbufs_end) { task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST); task->cur_mbufs_beg = 0; } uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg; struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; j = task->cancelled; if (task->cancelled) { uint16_t pkt_len = mbuf_wire_size(mbufs[0]); if (token_time_take(&task->token_time, pkt_len) != 0) return -1; out[0] = task->out_saved; task->cancelled = 0; } /* Main proc loop */ for (; j < n_pkts; ++j) { if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) { plogdx_err(mbufs[j], "Unknown packet, parsing failed\n"); out[j] = OUT_DISCARD; } conn = NULL; ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); if (ret >= 0) conn = task->bundle_ctx_pool.hash_entries[ret]; else { /* If not part of existing connection, try to create a connection */ struct new_tuple nt; nt.dst_addr = pkt_tuple.dst_addr; nt.proto_id = pkt_tuple.proto_id; nt.dst_port = pkt_tuple.dst_port; rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types)); const struct bundle_cfg *n; if (NULL != (n = server_accept(task, &nt))) { conn = bundle_ctx_pool_get(&task->bundle_ctx_pool); if (!conn) { out[j] = OUT_DISCARD; plogx_err("No more free bundles to accept new connection\n"); continue; } ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); if (ret < 0) { out[j] = OUT_DISCARD; bundle_ctx_pool_put(&task->bundle_ctx_pool, conn); plog_err("Adding key failed while trying to accept connection\n"); continue; } task->bundle_ctx_pool.hash_entries[ret] = conn; bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed); conn->tuple = pkt_tuple; if (conn->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; } else { plog_err("Packet received for service that does not exist :\n" "source ip = %0x:%u\n" "dst ip = %0x:%u\n", pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port), pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port)); } } /* bundle contains either an active connection or a newly created connection. If it is NULL, then not listening. */ if (NULL != conn) { ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[j] = ret == 0? 0: OUT_HANDLED; if (ret == 0) { uint16_t pkt_len = mbuf_wire_size(mbufs[j]); if (token_time_take(&task->token_time, pkt_len) != 0) { task->out_saved = out[j]; task->cancelled = 1; task->base.tx_pkt(&task->base, mbufs, j, out); task->cur_mbufs_beg += j; return -1; } } } else { pkt_tuple_debug(&pkt_tuple); plogd_dbg(mbufs[j], NULL); out[j] = OUT_DISCARD; } } task->base.tx_pkt(&task->base, mbufs, j, out); task->cur_mbufs_beg += j; return 0; }
static int handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_client *task = (struct task_gen_client *)tbase; uint8_t out[MAX_PKT_BURST] = {0}; struct bundle_ctx *conn; int ret; if (n_pkts) { for (int i = 0; i < n_pkts; ++i) { struct pkt_tuple pt; struct l4_meta l4_meta; if (parse_pkt(mbufs[i], &pt, &l4_meta)) { plogdx_err(mbufs[i], "Parsing failed\n"); out[i] = OUT_DISCARD; continue; } ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt); if (ret < 0) { plogx_dbg("Client: packet RX that does not belong to connection:" "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port)); plogdx_dbg(mbufs[i], NULL); if (pt.proto_id == IPPROTO_TCP) { stream_tcp_create_rst(mbufs[i], &l4_meta, &pt); out[i] = 0; continue; } else { out[i] = OUT_DISCARD; continue; } } conn = task->bundle_ctx_pool.hash_entries[ret]; ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: OUT_HANDLED; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } /* If there is at least one callback to handle, handle at most MAX_PKT_BURST */ if (heap_top_is_lower(task->heap, rte_rdtsc())) { if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) return 0; uint16_t n_called_back = 0; while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < MAX_PKT_BURST) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { out[n_called_back] = 0; n_called_back++; } } plogx_dbg("During callback, will send %d packets\n", n_called_back); task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; } uint32_t n_new = task->bundle_ctx_pool.n_free_bundles; n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new; uint64_t diff = (rte_rdtsc() - task->new_conn_last_tsc)/task->new_conn_cost; task->new_conn_last_tsc += diff * task->new_conn_cost; task->new_conn_tokens += diff; if (task->new_conn_tokens > 16) task->new_conn_tokens = 16; if (n_new > task->new_conn_tokens) n_new = task->new_conn_tokens; task->new_conn_tokens -= n_new; if (n_new == 0) return 0; if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) return 0; for (uint32_t i = 0; i < n_new; ++i) { struct bundle_ctx *bundle_ctx = bundle_ctx_pool_get_w_cfg(&task->bundle_ctx_pool); PROX_ASSERT(bundle_ctx); struct pkt_tuple *pt = &bundle_ctx->tuple; int n_retries = 0; do { /* Note that the actual packet sent will contain swapped addresses and ports (i.e. pkt.src <=> tuple.dst). The incoming packet will match this struct. */ bundle_init(bundle_ctx, task->heap, PEER_CLIENT, &task->seed); ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt); if (ret >= 0) { if (n_retries++ == 1000) { plogx_err("Already tried 1K times\n"); } } } while (ret >= 0); ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt); if (ret < 0) { plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles); bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx); pkt_tuple_debug2(pt); out[i] = OUT_DISCARD; continue; } task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx; if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; task->l4_stats.bundles_created++; ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: OUT_HANDLED; } int ret2 = task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out); task->n_new_mbufs -= n_new; return ret2; }
int pfring_dag_recv(pfring *ring, u_char** buffer, u_int buffer_len, struct pfring_pkthdr *hdr, u_int8_t wait_for_incoming_packet) { int caplen = 0; int skip; dag_record_t *erf_hdr; uint16_t rlen; u_char *payload; uint8_t *ext_hdr_type; uint32_t ext_hdr_num; uint32_t len; unsigned long long ts; int retval = 0; if(ring->priv_data == NULL) return -1; pfring_dag *d = (pfring_dag *) ring->priv_data; if(ring->reentrant) pthread_spin_lock(&ring->spinlock); check_and_poll: if (ring->break_recv_loop) goto exit; /* retval = 0 */ if ((d->top - d->bottom) < dag_record_size) { if ( (d->top = dag_advance_stream(d->fd, d->stream_num, (void * /* but it is void** */) &d->bottom)) == NULL) { retval = -1; goto exit; } if ( (d->top - d->bottom) < dag_record_size && !wait_for_incoming_packet ) goto exit; /* retval = 0 */ goto check_and_poll; } erf_hdr = (dag_record_t *) d->bottom; rlen = ntohs(erf_hdr->rlen); if (rlen < dag_record_size) { fprintf(stderr, "Error: wrong record size\n"); retval = -1; goto exit; } d->bottom += rlen; skip = 0; switch((erf_hdr->type & 0x7f)) { case TYPE_PAD: skip = 1; case TYPE_ETH: /* stats update */ if (erf_hdr->lctr) { if (d->stats_drop > (UINT_MAX - ntohs(erf_hdr->lctr))) d->stats_drop = UINT_MAX; else d->stats_drop += ntohs(erf_hdr->lctr); } break; default: break; } if (skip) goto check_and_poll; payload = (u_char *) erf_hdr; payload += dag_record_size; /* computing extension headers size */ ext_hdr_type = &erf_hdr->type; ext_hdr_num = 0; while ( (*ext_hdr_type & 0x80) && (rlen > (16 + ext_hdr_num * 8)) ) { ext_hdr_type += 8; ext_hdr_num++; } payload += 8 * ext_hdr_num; switch((erf_hdr->type & 0x7f)) { case TYPE_COLOR_HASH_ETH: case TYPE_DSM_COLOR_ETH: case TYPE_COLOR_ETH: case TYPE_ETH: len = ntohs(erf_hdr->wlen); if (d->strip_crc) len -= 4; caplen = rlen; caplen -= dag_record_size; caplen -= (8 * ext_hdr_num); caplen -= 2; if (caplen > ring->caplen) caplen = ring->caplen; if (caplen > len) caplen = len; if((buffer_len > 0) && (caplen > buffer_len)) caplen = buffer_len; payload += 2; break; default: #ifdef DAG_DEBUG printf("Warning: unhandled ERF type\n"); #endif goto check_and_poll; } if (buffer_len > 0){ if(*buffer != NULL && caplen > 0) memcpy(*buffer, payload, caplen); } else *buffer = payload; hdr->caplen = caplen; hdr->len = len; /* computing timestamp as from DAG docs */ ts = erf_hdr->ts; hdr->ts.tv_sec = ts >> 32; ts = (ts & 0xffffffffULL) * 1000000; ts += 0x80000000; hdr->ts.tv_usec = ts >> 32; if (hdr->ts.tv_usec >= 1000000) { hdr->ts.tv_usec -= 1000000; hdr->ts.tv_sec++; } #ifdef PFRING_DAG_PARSE_PKT parse_pkt(*buffer, hdr); #else hdr->extended_hdr.parsed_header_len = 0; #endif d->stats_recv++; retval = 1; exit: if(ring->reentrant) pthread_spin_unlock(&ring->spinlock); return retval; }
static void handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_server *task = (struct task_gen_server *)tbase; struct pkt_tuple pkt_tuple[MAX_PKT_BURST]; uint8_t out[MAX_PKT_BURST]; struct l4_meta l4_meta[MAX_PKT_BURST]; struct bundle_ctx *conn; int ret; for (uint16_t j = 0; j < n_pkts; ++j) { if (parse_pkt(mbufs[j], &pkt_tuple[j], &l4_meta[j])) plogdx_err(mbufs[j], "Unknown packet, parsing failed\n"); } /* Main proc loop */ for (uint16_t j = 0; j < n_pkts; ++j) { conn = NULL; ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]); if (ret >= 0) conn = task->bundle_ctx_pool.hash_entries[ret]; /* If not part of existing connection, try to create a connection */ if (NULL == conn) { struct new_tuple nt; nt.dst_addr = pkt_tuple[j].dst_addr; nt.proto_id = pkt_tuple[j].proto_id; nt.dst_port = pkt_tuple[j].dst_port; rte_memcpy(nt.l2_types, pkt_tuple[j].l2_types, sizeof(nt.l2_types)); const struct bundle_cfg *n; if (NULL != (n = server_accept(task, &nt))) { conn = bundle_ctx_pool_get(&task->bundle_ctx_pool); if (!conn) { out[j] = NO_PORT_AVAIL; plogx_err("No more free bundles to accept new connection\n"); continue; } ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple[j]); if (ret < 0) { out[j] = NO_PORT_AVAIL; bundle_ctx_pool_put(&task->bundle_ctx_pool, conn); plog_err("Adding key failed while trying to accept connection\n"); continue; } task->bundle_ctx_pool.hash_entries[ret] = conn; bundle_init(conn, n, task->heap, PEER_SERVER, &task->seed); conn->tuple = pkt_tuple[j]; if (conn->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; } } /* bundle contains either an active connection or a newly created connection. If it is NULL, then not listening. */ if (NULL != conn) { int ret = bundle_proc_data(conn, mbufs[j], &l4_meta[j], &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[j] = ret == 0? 0: NO_PORT_AVAIL; } else { plog_err("Packet received for service that does not exist\n"); pkt_tuple_debug(&pkt_tuple[j]); plogd_dbg(mbufs[j], NULL); out[j] = NO_PORT_AVAIL; } } conn = NULL; task->base.tx_pkt(&task->base, mbufs, n_pkts, out); if (!(task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap))) return ; if (task->n_new_mbufs < MAX_PKT_BURST) { if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) { return ; } for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) { init_mbuf_seg(task->new_mbufs[i]); } task->n_new_mbufs = MAX_PKT_BURST; } if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) { uint16_t n_called_back = 0; while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { out[n_called_back] = 0; n_called_back++; } } task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; } }
static void handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_gen_client *task = (struct task_gen_client *)tbase; uint8_t out[MAX_PKT_BURST] = {0}; struct bundle_ctx *conn; int ret; if (n_pkts) { for (int i = 0; i < n_pkts; ++i) { struct pkt_tuple pt; struct l4_meta l4_meta; if (parse_pkt(mbufs[i], &pt, &l4_meta)) { plogdx_err(mbufs[i], "Parsing failed\n"); out[i] = NO_PORT_AVAIL; continue; } ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt); if (ret < 0) { plogx_dbg("Client: packet RX that does not belong to connection:" "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&pt.dst_addr)), rte_bswap16(pt.dst_port), IPv4_BYTES(((uint8_t*)&pt.src_addr)), rte_bswap16(pt.src_port)); plogdx_dbg(mbufs[i], NULL); // if tcp, send RST /* pkt_tuple_debug2(&pt); */ out[i] = NO_PORT_AVAIL; continue; } conn = task->bundle_ctx_pool.hash_entries[ret]; ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: NO_PORT_AVAIL; } task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } if (task->n_new_mbufs < MAX_PKT_BURST) { if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) { plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST); return ; } for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) { init_mbuf_seg(task->new_mbufs[i]); } task->n_new_mbufs = MAX_PKT_BURST; } /* If there is at least one callback to handle, handle at most MAX_PKT_BURST */ if (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap)) { uint16_t n_called_back = 0; while (task->heap->n_elems && rte_rdtsc() > heap_peek_prio(task->heap) && n_called_back < MAX_PKT_BURST) { conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); /* handle packet TX (retransmit or delayed transmit) */ ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); if (ret == 0) { out[n_called_back] = 0; n_called_back++; } } plogx_dbg("During callback, will send %d packets\n", n_called_back); task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); task->n_new_mbufs -= n_called_back; } int n_new = task->bundle_ctx_pool.n_free_bundles; n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new; if (n_new == 0) return ; if (task->n_new_mbufs < MAX_PKT_BURST) { if (rte_mempool_get_bulk(task->mempool, (void **)task->new_mbufs, MAX_PKT_BURST - task->n_new_mbufs) < 0) { plogx_err("4Mempool alloc failed %d\n", MAX_PKT_BURST); return ; } for (uint32_t i = 0; i < MAX_PKT_BURST - task->n_new_mbufs; ++i) { init_mbuf_seg(task->new_mbufs[i]); } task->n_new_mbufs = MAX_PKT_BURST; } for (int i = 0; i < n_new; ++i) { int32_t ret = cdf_sample(task->cdf, &task->seed); /* Select a new bundle_cfg according to imix */ struct bundle_cfg *bundle_cfg = &task->bundle_cfgs[ret]; struct bundle_ctx *bundle_ctx; bundle_ctx = bundle_ctx_pool_get(&task->bundle_ctx_pool); /* Should be an assert: */ if (!bundle_ctx) { plogx_err("No more available bundles\n"); exit(-1); } struct pkt_tuple *pt = &bundle_ctx->tuple; int n_retries = 0; do { /* Note that the actual packet sent will contain swapped addresses and ports (i.e. pkt.src <=> tuple.dst). The incoming packet will match this struct. */ bundle_init(bundle_ctx, bundle_cfg, task->heap, PEER_CLIENT, &task->seed); ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt); if (n_retries == 1000) { plogx_err("Already tried 1K times\n"); } if (ret >= 0) { n_retries++; } } while (ret >= 0); ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt); if (ret < 0) { plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles); bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx); pkt_tuple_debug2(pt); out[i] = NO_PORT_AVAIL; continue; } task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx; if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP) task->l4_stats.tcp_created++; else task->l4_stats.udp_created++; ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); out[i] = ret == 0? 0: NO_PORT_AVAIL; } task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out); task->n_new_mbufs -= n_new; }
/* MySQL will send N amount of logical packets in one physical packet. Each logical packet starts with a MySQL header which says how long that logical pkt is minus the header itself (m->pkt_length). Along w/ the total length of captured MySQL data from libpcap (total_len), we can seperate all the logical pkts even though they all vary in length. */ int multi_pkts(const u_char *pkts, u_int total_len) { int retval = PKT_UNKNOWN_STATE; u_int i = 0; u_int used_len = 0; struct mysql_hdr *m; // If last pkt was fragmented, merge with current pkts if(tag->pkt_fragment) { tag->pkt_fragment = 0; printf("\n\t::FRAGMENT START::\n\t"); if(buff_frag) buff_frag = (u_char *)realloc(buff_frag, tag->frag_len + total_len); else buff_frag = (u_char *)malloc(tag->frag_len + total_len); memcpy(buff_frag, tag->frag, tag->frag_len); memcpy(buff_frag + tag->frag_len, pkts, total_len); pkts = buff_frag; total_len += tag->frag_len; } while(1) { m = (struct mysql_hdr *)pkts; // Packet header pkts += 4; // First data byte i += 4; // Check if pkts > len of pkts actually received (last pkt is fragmented) used_len = used_len + m->pkt_length + 4; if(used_len > total_len) { tag->pkt_fragment = 1; tag->frag_len = m->pkt_length - (used_len - total_len) + 4; pkts -= 4; if(tag->frag) tag->frag = (u_char *)realloc(tag->frag, tag->frag_len); else tag->frag = (u_char *)malloc(tag->frag_len); memcpy(tag->frag, pkts, tag->frag_len); printf("::FRAGMENT END::\n"); retval = PKT_FRAGMENTED; break; } tag->current_pkt_id = m->pkt_id; if(!op.no_myhdrs) printf("ID %u len %u ", m->pkt_id, m->pkt_length); total_mysql_pkts++; total_mysql_bytes = total_mysql_bytes + 4 + m->pkt_length; if(m->pkt_length) { memcpy(buff, pkts, m->pkt_length); retval = parse_pkt(buff, m->pkt_length); } else printf("ID %u Zero-length MySQL packet ", m->pkt_id); printf("\n"); tag->last_pkt_id = m->pkt_id; if((i + m->pkt_length) >= total_len) break; // No more pkts pkts += m->pkt_length; // Next pkt header i += m->pkt_length; if(retval == PKT_PARSED_OK) tag->last_origin = tag->current_origin; printf("\t"); } return retval; }