static void test_send_frame_vlan_to_vlan(void) { odp_packet_t pkt = ODP_PACKET_INVALID; odp_event_t ev; uint8_t check_buf[144]; int res; if (create_odp_packet_ip4(&pkt, test_frame_vlan, sizeof(test_frame_vlan), 0)) { CU_FAIL("Fail to create packet"); return; } memcpy(check_buf, test_frame_vlan, sizeof(test_frame_vlan)); check_buf[15] = dev_vlan->vlan; res = ofp_send_frame(dev_vlan, pkt); CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); ev = odp_queue_deq(dev->outq_def); CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID); pkt = odp_packet_from_event(ev); CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt), sizeof(test_frame_vlan)); if (memcmp(odp_packet_l2_ptr(pkt, NULL), check_buf, sizeof(test_frame_vlan))) CU_FAIL("Frame data mismatch."); }
static void test_ofp_packet_input_to_sp(void) { odp_packet_t pkt; odp_event_t ev; int res; my_test_val = TEST_FORWARD_HOOK; /* Call ofp_packet_input using a pkt with destination ip * that does NOT match the local ip on ifnet and NO route is found. * The packet is forwarded to slow path queue. */ if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), 0, 0)) { CU_FAIL("Fail to create packet"); return; } res = ofp_packet_input(pkt, interface_queue[port], ofp_eth_vlan_processing); CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); CU_ASSERT_NOT_EQUAL(ev = odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID); CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID); CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID); if (memcmp(odp_packet_data(odp_packet_from_event(ev)), in_pkt_data, sizeof(test_frame))) CU_FAIL("corrupt data sent to slow path"); odp_packet_free(odp_packet_from_event(ev)); CU_PASS("ofp_packet_input_to_sp"); }
static void check_lun_is_wiped(unsigned char *buf, uint64_t lba) { unsigned char *rbuf = alloca(256 * block_size); READ16(sd, NULL, lba, 256 * block_size, block_size, 0, 0, 0, 0, 0, rbuf, EXPECT_STATUS_GOOD); if (rc16 == NULL) { return; } if (rc16->lbprz) { logging(LOG_VERBOSE, "LBPRZ==1 All blocks " "should read back as 0"); if (all_zero(rbuf, 256 * block_size) == 0) { logging(LOG_NORMAL, "[FAILED] Blocks did not " "read back as zero"); CU_FAIL("[FAILED] Blocks did not read back " "as zero"); } else { logging(LOG_VERBOSE, "[SUCCESS] Blocks read " "back as zero"); } } else { logging(LOG_VERBOSE, "LBPRZ==0 Blocks should not read back as " "all 'a' any more"); if (!memcmp(buf, rbuf, 256 * block_size)) { logging(LOG_NORMAL, "[FAILED] Blocks were not wiped"); CU_FAIL("[FAILED] Blocks were not wiped"); } else { logging(LOG_VERBOSE, "[SUCCESS] Blocks were wiped"); } } }
/* @private Handle a received (timeout) event */ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick) { CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a timeout event */ CU_FAIL("Unexpected event type received"); return; } /* Read the metadata from the timeout */ odp_timeout_t tmo = odp_timeout_from_event(ev); odp_timer_t tim = odp_timeout_timer(tmo); uint64_t tick = odp_timeout_tick(tmo); struct test_timer *ttp = odp_timeout_user_ptr(tmo); if (tim == ODP_TIMER_INVALID) CU_FAIL("odp_timeout_timer() invalid timer"); if (!ttp) CU_FAIL("odp_timeout_user_ptr() null user ptr"); if (ttp && ttp->ev2 != ev) CU_FAIL("odp_timeout_user_ptr() wrong user ptr"); if (ttp && ttp->tim != tim) CU_FAIL("odp_timeout_timer() wrong timer"); if (stale) { if (odp_timeout_fresh(tmo)) CU_FAIL("Wrong status (fresh) for stale timeout"); /* Stale timeout => local timer must have invalid tick */ if (ttp && ttp->tick != TICK_INVALID) CU_FAIL("Stale timeout for active timer"); } else { if (!odp_timeout_fresh(tmo)) CU_FAIL("Wrong status (stale) for fresh timeout"); /* Fresh timeout => local timer must have matching tick */ if (ttp && ttp->tick != tick) { LOG_DBG("Wrong tick: expected %" PRIu64 " actual %" PRIu64 "\n", ttp->tick, tick); CU_FAIL("odp_timeout_tick() wrong tick"); } /* Check that timeout was delivered 'timely' */ if (tick > odp_timer_current_tick(tp)) CU_FAIL("Timeout delivered early"); if (tick < prev_tick) { LOG_DBG("Too late tick: %" PRIu64 " prev_tick %" PRIu64"\n", tick, prev_tick); /* We don't report late timeouts using CU_FAIL */ odp_atomic_inc_u32(&ndelivtoolate); } } if (ttp) { /* Internal error */ CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID); ttp->ev = ev; } }
/* * Tests */ static void test_packet_output_gre(void) { odp_packet_t pkt = ODP_PACKET_INVALID; odp_event_t ev; int res; struct ofp_ether_header *eth; struct ofp_ip *ip; struct ofp_ip *ip_orig; struct ofp_greip *greip; if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), tun_p2p)) { CU_FAIL("Fail to create packet"); return; } /* * Packet's destination is GRE tunnel's p2p address, next hop is GRE * interface. GRE+IP header is prepended. Packet's new destination is * link local. Packet is put into output queue. */ res = ofp_ip_output(pkt, NULL); CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); res = ofp_send_pending_pkt(); CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); ev = odp_queue_deq(dev->outq_def); CU_ASSERT_NOT_EQUAL_FATAL(ev, ODP_EVENT_INVALID); pkt = odp_packet_from_event(ev); CU_ASSERT_EQUAL_FATAL(odp_packet_len(pkt), sizeof(test_frame) + 20 + 4); eth = odp_packet_l2_ptr(pkt, NULL); if (memcmp(eth->ether_dhost, tun_rem_mac, OFP_ETHER_ADDR_LEN)) CU_FAIL("Bad destination mac address."); if (memcmp(eth->ether_shost, dev->mac, OFP_ETHER_ADDR_LEN)) CU_FAIL("Bad source mac address."); ip = odp_packet_l3_ptr(pkt, NULL); CU_ASSERT_EQUAL(ip->ip_src.s_addr, dev_ip); CU_ASSERT_EQUAL(ip->ip_dst.s_addr, tun_rem_ip); CU_ASSERT_EQUAL(ip->ip_p, OFP_IPPROTO_GRE); greip = (struct ofp_greip *)ip; CU_ASSERT_EQUAL(greip->gi_g.flags, 0); CU_ASSERT_EQUAL(greip->gi_g.ptype, odp_cpu_to_be_16(OFP_ETHERTYPE_IP)); /* inner ip */ ip = (struct ofp_ip *)(greip + 1); ip_orig = (struct ofp_ip *)(&orig_pkt_data[OFP_ETHER_HDR_LEN]); if (memcmp(ip, ip_orig, odp_be_to_cpu_16(ip_orig->ip_len))) CU_FAIL("Inner IP packet error."); }
void test_writesame10_unmap_vpd(void) { int ret; unsigned char *buf = alloca(block_size); logging(LOG_VERBOSE, LOG_BLANK_LINE); logging(LOG_VERBOSE, "Test WRITESAME10 UNMAP availability is " "consistent with VPD settings"); CHECK_FOR_DATALOSS; CHECK_FOR_SBC; logging(LOG_VERBOSE, "Check if WRITESAME10 can be used for UNMAP."); logging(LOG_VERBOSE, "Unmap 1 block using WRITESAME10"); memset(buf, 0, block_size); ret = writesame10(sd, 0, block_size, 1, 0, 1, 0, 0, buf, EXPECT_STATUS_GOOD); if (ret != 0) { logging(LOG_VERBOSE, "WRITESAME10 UNMAP is not available. " "Verify that VPD settings reflect this."); logging(LOG_VERBOSE, "Verify that LBPWS10 is clear."); if (inq_lbp && inq_lbp->lbpws10) { logging(LOG_NORMAL, "[FAILED] WRITESAME10 UNMAP is not " "implemented but LBPWS10 is set"); CU_FAIL("[FAILED] WRITESAME10 UNMAP is unavailable but " "LBPWS10==1"); } else { logging(LOG_VERBOSE, "[SUCCESS] LBPWS10 is clear."); } } else { logging(LOG_VERBOSE, "WRITESAME10 UNMAP is available. Verify " "that VPD settings reflect this."); logging(LOG_VERBOSE, "Verify that LBPME is set."); if (rc16 && rc16->lbpme) { logging(LOG_VERBOSE, "[SUCCESS] LBPME is set."); } else { logging(LOG_NORMAL, "[FAILED] WRITESAME10 UNMAP is " "implemented but LBPME is not set"); CU_FAIL("[FAILED] UNMAP is available but LBPME==0"); } logging(LOG_VERBOSE, "Verify that LBPWS10 is set."); if (inq_lbp && inq_lbp->lbpws10) { logging(LOG_VERBOSE, "[SUCCESS] LBPWS10 is set."); } else { logging(LOG_NORMAL, "[FAILED] WRITESAME10 UNMAP is " "implemented but LBPWS10 is not set"); CU_FAIL("[FAILED] UNMAP is available but LBPWS10==0"); } } }
void test_report_supported_opcodes_servactv(void) { int i, ret; struct scsi_task *rso_task; struct scsi_report_supported_op_codes *rsoc; logging(LOG_VERBOSE, LOG_BLANK_LINE); logging(LOG_VERBOSE, "Test READ_SUPPORTED_OPCODES SERVACTV flag"); ret = report_supported_opcodes( sd, &rso_task, 0, SCSI_REPORT_SUPPORTING_OPS_ALL, 0, 0, 65535, EXPECT_STATUS_GOOD); if (ret == -2) { logging(LOG_NORMAL, "[SKIPPED] READ_SUPPORTED_OPCODES is not " "implemented."); CU_PASS("READ_SUPPORTED_OPCODES is not implemented."); scsi_free_scsi_task(rso_task); return; } CU_ASSERT_EQUAL(ret, 0); if (ret != 0) { scsi_free_scsi_task(rso_task); return; } logging(LOG_VERBOSE, "Unmarshall the DATA-IN buffer"); rsoc = scsi_datain_unmarshall(rso_task); CU_ASSERT_NOT_EQUAL(rsoc, NULL); if (!rsoc) { logging(LOG_NORMAL, "[FAILED] Target did not return any data " "for ReportSupportedOpcodes\n"); CU_FAIL("Target did not return any data for " "ReportSupportedOpcodes"); return; } logging(LOG_VERBOSE, "Verify that when SERVACTV is clear then " "ServiceAction must be zero."); for (i = 0; i < rsoc->num_descriptors; i++) { if (!rsoc->descriptors[i].servactv && rsoc->descriptors[i].sa) { logging(LOG_NORMAL, "[FAILED] ServiceAction is " "non-zero but SERVACTV is clear"); CU_FAIL("[FAILED] ServiceAction is " "non-zero but SERVACTV is clear"); } } scsi_free_scsi_task(rso_task); }
void test_conf_parse_file(void) { /* Writes test content in test configuration file */ char path[256] = CONF_TEST_PATH; // printf("%s", __FILE__); // sprintf(path, "%s/../%s", dirname(__FILE__), CONF_TEST_PATH); // sprintf(path, "%s/../%s", dirname(__FILE__), CONF_TEST_PATH); FILE *fp = fopen(path, "w"); if (NULL == fp) { perror("fopen()"); CU_FAIL("could not create test config file"); return; } int octets = fprintf(fp, "%s", CONF_TEST_FILE); if (octets != strlen(CONF_TEST_FILE)) { CU_FAIL("test content could not be copied in test file"); return; } fclose(fp); /* Tests parsed configuration */ service_list_t* list = conf_parse_file(path); CU_ASSERT_PTR_NOT_NULL_FATAL(list); CU_ASSERT_EQUAL(list->count, 4); service_t* srv = service_list_lookup(list, "SRV12"); CU_ASSERT_PTR_NOT_NULL_FATAL(srv); CU_ASSERT_STRING_EQUAL(srv->warn, "1"); CU_ASSERT_STRING_EQUAL(srv->crit, "2"); srv = service_list_lookup(list, "SRV34"); CU_ASSERT_PTR_NOT_NULL_FATAL(srv); CU_ASSERT_PTR_NOT_NULL(srv); CU_ASSERT_STRING_EQUAL(srv->warn, "3"); CU_ASSERT_STRING_EQUAL(srv->crit, "4"); srv = service_list_lookup(list, "SRV56"); CU_ASSERT_PTR_NOT_NULL_FATAL(srv); CU_ASSERT_STRING_EQUAL(srv->warn, "5"); CU_ASSERT_STRING_EQUAL(srv->crit, "6"); srv = service_list_lookup(list, "SRV78"); CU_ASSERT_PTR_NOT_NULL_FATAL(srv); CU_ASSERT_STRING_EQUAL(srv->warn, "a"); CU_ASSERT_STRING_EQUAL(srv->crit, "-1"); service_list_free(list); }
// @filter_index:test_filter_index_2 => [filter_index ne libère pas la mémoire des éléments qu'elle supprime.] void test_filter_index_2(void) { Entry *ent1 = (Entry*) malloc(sizeof(Entry)); Entry *ent2 = (Entry*) malloc(sizeof(Entry)); Entry *ent3 = (Entry*) malloc(sizeof(Entry)); Entry *ent4 = (Entry*) malloc(sizeof(Entry)); Entry *ent5 = (Entry*) malloc(sizeof(Entry)); if (ent1 == NULL || ent2 == NULL || ent3 == NULL || ent4 == NULL || ent5 == NULL) { CU_FAIL("La mémoire n'a pas pu être allouée pour le test test_filter_index_2."); return; } *ent1 = (Entry){"lorem", 2, ent2}; *ent2 = (Entry){"ipsum", 2, ent3}; *ent3 = (Entry){"dolor", 1, ent4}; *ent4 = (Entry){"sit", 1, ent5}; *ent5 = (Entry){"amet", 1, NULL}; free_count = 0; filter_index(&ent1,2); CU_ASSERT_EQUAL(free_count,3); free(ent1); free(ent2); }
void pktio_test_inq_remdef(void) { odp_pktio_t pktio; odp_queue_t inq; odp_event_t ev; uint64_t wait; int i; pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_SEND); CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID); CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0); inq = odp_pktio_inq_getdef(pktio); CU_ASSERT(inq != ODP_QUEUE_INVALID); CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0); wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS); for (i = 0; i < 100; i++) { ev = odp_schedule(NULL, wait); if (ev != ODP_EVENT_INVALID) { odp_event_free(ev); CU_FAIL("received unexpected event"); } } CU_ASSERT(odp_queue_destroy(inq) == 0); CU_ASSERT(odp_pktio_close(pktio) == 0); }
static void test_ofp_packet_input_local_UDPv4_hook(void) { odp_packet_t pkt; int res; /* Call ofp_packet_input with a pkt with destination ip * that matches the local ip on ifnet. * The packet is terminated in local UDPv4 hook */ my_test_val = TEST_LOCAL_UDPv4_HOOK; ifnet->ip_addr = dst_ipaddr; if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), dst_ipaddr, 0)) { CU_FAIL("Fail to create packet"); return; } res = ofp_packet_input(pkt, interface_queue[port], ofp_eth_vlan_processing); CU_ASSERT_EQUAL(res, OFP_TEST_LOCAL_UDPv4_HOOK); #ifdef SP CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID); #endif /* SP */ CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID); ifnet->ip_addr = 0; CU_PASS("ofp_packet_input_local_UDPv4_hook"); }
static int destroy_inq(odp_pktio_t pktio) { odp_queue_t inq; odp_event_t ev; odp_queue_type_t q_type; inq = odp_pktio_inq_getdef(pktio); if (inq == ODP_QUEUE_INVALID) { CU_FAIL("attempting to destroy invalid inq"); return -1; } CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0); q_type = odp_queue_type(inq); /* flush any pending events */ while (1) { if (q_type == ODP_QUEUE_TYPE_POLL) ev = odp_queue_deq(inq); else ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); if (ev != ODP_EVENT_INVALID) odp_event_free(ev); else break; } return odp_queue_destroy(inq); }
void pktio_test_send_on_ronly(void) { odp_pktio_t pktio; odp_packet_t pkt; int ret; pktio = create_pktio(0, ODP_PKTIN_MODE_RECV, ODP_PKTOUT_MODE_DISABLED); if (pktio == ODP_PKTIO_INVALID) { CU_FAIL("failed to open pktio"); return; } ret = odp_pktio_start(pktio); CU_ASSERT_FATAL(ret == 0); pkt = odp_packet_alloc(default_pkt_pool, packet_len); CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID) pktio_init_packet(pkt); ret = odp_pktio_send(pktio, &pkt, 1); CU_ASSERT(ret < 0); if (ret <= 0) odp_packet_free(pkt); ret = odp_pktio_stop(pktio); CU_ASSERT_FATAL(ret == 0); ret = odp_pktio_close(pktio); CU_ASSERT_FATAL(ret == 0); }
// @calloc2:test_calloc2_3 => [Votre fonction calloc ne prend pas en compte le fait que malloc puisse échouer.] void test_calloc2_3(void) { let_malloc_fail = 1; //On indique qu'on veut que malloc utilisé par calloc2 échoue // Pour plus de détails sur le fonctionnement des signaux et l'interception des segfaults, // voir mini-projet-string/tests.c if (signal(SIGSEGV, sig_handler) == SIG_ERR) { CU_FAIL("Impossible d'enregistrer un signal handler."); return; } if(setjmp(label_test_calloc2_3)==0) { calloc2(42, 42); } else if { /* IMPORTANT ! On remet let_malloc_fail à 0 pour que CUnit puisse réutiliser malloc par la suite. * Ici, si on ne pense pas à remettre cette variable à 0, CUnit ne prend pas en compte l'échec du test. */ let_malloc_fail = 0; CU_ASSERT_TRUE(0); } /* IMPORTANT ! On remet let_malloc_fail à 0 pour que CUnit puisse réutiliser malloc par la suite. */ let_malloc_fail = 0; signal(SIGSEGV, SIG_DFL); }
void pktio_test_recv_on_wonly(void) { odp_pktio_t pktio; odp_packet_t pkt; int ret; pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED, ODP_PKTOUT_MODE_SEND); if (pktio == ODP_PKTIO_INVALID) { CU_FAIL("failed to open pktio"); return; } ret = odp_pktio_start(pktio); CU_ASSERT_FATAL(ret == 0); ret = odp_pktio_recv(pktio, &pkt, 1); CU_ASSERT(ret < 0); if (ret > 0) odp_packet_free(pkt); ret = odp_pktio_stop(pktio); CU_ASSERT_FATAL(ret == 0); ret = odp_pktio_close(pktio); CU_ASSERT_FATAL(ret == 0); }
int destroy_inq(odp_pktio_t pktio) { odp_queue_t inq; odp_event_t ev; inq = odp_pktio_inq_getdef(pktio); if (inq == ODP_QUEUE_INVALID) { CU_FAIL("attempting to destroy invalid inq"); return -1; } if (0 > odp_pktio_inq_remdef(pktio)) return -1; while (1) { ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); if (ev != ODP_EVENT_INVALID) odp_event_free(ev); else break; } return odp_queue_destroy(inq); }
void test_hmac() { int x = 0, y = 0; char keystr[100]; size_t keylen; char teststr[100]; size_t testlen; char hmac[100]; char res[64]; size_t reslen; while(strcmp(test_hmac_data[x].key,"") != 0) { keylen = hex_to_char_buf(test_hmac_data[x].key, test_hmac_data[x].keylen, keystr, 100); testlen = hex_to_char_buf(test_hmac_data[x].data, test_hmac_data[x].datalen, teststr, 100); reslen = hex_to_char_buf(test_hmac_data[x].hmac_result, 32, res, 64); hmac_md5(keystr, keylen, teststr, testlen, hmac); if(strncmp(hmac, res, 64) != 0) { y++; } CU_ASSERT(strncmp(hmac, res, 64) == 0); x++; } CU_FAIL("Test not implemented"); }
static void test_normal_fft() { size_t i; double re[FFT_SIZE] = {0}; double ref[FFT_SIZE] = {0}; double im[FFT_SIZE] = {0}; for(i = 0; i < ARRAY_SIZE(re); i++) { re[i] = sin(((double)i/(double)256)* (double)2.0 * (double)3.14); ref[i] = re[i]; im[i] = 0; } fft(re,im,8); ifft(re,im,8); for(i = 0; i < ARRAY_SIZE(re); i++) { if((ref[i] < (re[i] - 0.5)) || ((re[i] + 0.5) < ref[i])) { CU_FAIL("fft input output data not equal"); } } }
static uint32_t pktio_pkt_set_seq(odp_packet_t pkt) { static uint32_t tstseq; size_t off; pkt_head_t head; pkt_tail_t tail; off = odp_packet_l4_offset(pkt); if (off == ODP_PACKET_OFFSET_INVALID) { CU_FAIL("packet L4 offset not set"); return TEST_SEQ_INVALID; } head.magic = TEST_SEQ_MAGIC; head.seq = tstseq; off += ODPH_UDPHDR_LEN; if (odp_packet_copydata_in(pkt, off, sizeof(head), &head) != 0) return TEST_SEQ_INVALID; tail.magic = TEST_SEQ_MAGIC; off = odp_packet_len(pkt) - sizeof(pkt_tail_t); if (odp_packet_copydata_in(pkt, off, sizeof(tail), &tail) != 0) return TEST_SEQ_INVALID; tstseq++; return head.seq; }
static void match_strings(const char *actual, void *_ctx) { struct iter_ctx *ctx = _ctx; const char *expected; int ok; if (ctx->gone_bad) { return; } expected = ctx->expected[ctx->count]; ok = expected != NULL && (ctx->gone_bad = strcmp(expected, actual)) == 0; printf("\n%s(): expected vs actual (%s):" "\n '%s'" "\n '%s'" "\n", __func__, ok ? "OK" : "NOT OK", expected, actual); if (!ok) { CU_FAIL(); } ctx->count++; }
void test_memory_is_copied_1(void) { ihex_recordset_t *rs = ihex_rs_from_file("res/big-a.hex"); uint8_t *dst = (uint8_t*) malloc(8192); if (rs == NULL) { CU_FAIL("File \"res/big-a.hex\" does not exist."); return; } ihex_mem_copy(rs, dst, 8192, IHEX_WIDTH_8BIT, IHEX_ORDER_BIGENDIAN); // :100400000B 0B 0B 98 B0 2D 0B 0B 0B 88 80 04 00 00 00 00 29 CU_ASSERT_EQUAL(dst[0x400], 0x0B); CU_ASSERT_EQUAL(dst[0x401], 0x0B); CU_ASSERT_EQUAL(dst[0x402], 0x0B); CU_ASSERT_EQUAL(dst[0x403], 0x98); CU_ASSERT_EQUAL(dst[0x404], 0xB0); CU_ASSERT_EQUAL(dst[0x405], 0x2D); CU_ASSERT_EQUAL(dst[0x406], 0x0B); CU_ASSERT_EQUAL(dst[0x407], 0x0B); CU_ASSERT_EQUAL(dst[0x408], 0x0B); CU_ASSERT_EQUAL(dst[0x409], 0x88); CU_ASSERT_EQUAL(dst[0x40A], 0x80); CU_ASSERT_EQUAL(dst[0x40B], 0x04); CU_ASSERT_EQUAL(dst[0x40C], 0x00); CU_ASSERT_EQUAL(dst[0x40D], 0x00); CU_ASSERT_EQUAL(dst[0x40E], 0x00); CU_ASSERT_EQUAL(dst[0x40F], 0x00); }
static enum ofp_return_code fastpath_local_hook(odp_packet_t pkt, void *arg) { int protocol = *(int *)arg; (void) pkt; if (my_test_val == TEST_LOCAL_HOOK) { CU_ASSERT_EQUAL(protocol, IS_IPV4); CU_ASSERT_EQUAL(odp_packet_len(pkt), sizeof(test_frame)); if (memcmp((uint8_t *)odp_packet_data(pkt) + odp_packet_l3_offset(pkt), in_pkt_data + OFP_ETHER_HDR_LEN, odp_packet_len(pkt) - OFP_ETHER_HDR_LEN)) CU_FAIL("Corrupt data"); return OFP_TEST_LOCAL_HOOK; } else if (my_test_val == TEST_LOCAL_HOOK_GRE) { /* GRE packet is offered to local hook, then after processing to forward hook */ my_test_val = TEST_FORWARD_HOOK; return OFP_PKT_CONTINUE; } else if (my_test_val == TEST_LOCAL_HOOK_GRE_APP) { /* GRE packet is offered to local hook, then after tunnel is not found to GRE hook */ my_test_val = TEST_GRE_HOOK; return OFP_PKT_CONTINUE; } else if (my_test_val == TEST_LOCAL_IPv4_HOOK) return OFP_PKT_CONTINUE; else if (my_test_val == TEST_LOCAL_UDPv4_HOOK) return OFP_PKT_CONTINUE; else return OFP_TEST_FAIL; }
static void test_packet_output_gre_no_nexthop(void) { odp_packet_t pkt = ODP_PACKET_INVALID; odp_event_t ev; int res; if (create_odp_packet_ip4(&pkt, test_frame, sizeof(test_frame), tun_p2p + 1)) { CU_FAIL("Fail to create packet"); return; } /* * Packet's destination is GRE tunnel's p2p address, no next hop * is found for tunnel destination address, packet is dropped. */ res = ofp_ip_output(pkt, NULL); CU_ASSERT_EQUAL(res, OFP_PKT_DROP); res = ofp_send_pending_pkt(); CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); ev = odp_queue_deq(dev->outq_def); CU_ASSERT_EQUAL_FATAL(ev, ODP_EVENT_INVALID); }
/* test test_tt_setbyte() */ void test_tt_setbyte(void) { int i, rc; int rows = 10; int cols = rows * 8; tag_table *tt = NULL; char *row_ptr, *octet_ptr; char octet1 = (char)0x82; char octet2 = (char)0x91; char octet; /* create the tt object */ rc = tt_create(&tt, rows, cols); CU_ASSERT_EQUAL_FATAL(rc, TT_SUCCESS); CU_ASSERT_PTR_NOT_NULL_FATAL(tt); /* systematically set various bits within the table */ for (i = 0; i < rows; i++) { rc = tt_setbyte(tt, i, i, octet1); if (rc != TT_SUCCESS) CU_FAIL("tt_setbits() returned an error code"); rc = tt_setbyte(tt, i, (i + 1) % 8, octet1); if (rc != TT_SUCCESS) CU_FAIL("tt_setbits() returned an error code"); rc = tt_setbyte(tt, i, (i + 1) % 8, octet2); if (rc != TT_SUCCESS) CU_FAIL("tt_setbits() returned an error code"); rc = tt_setbyte(tt, i, (i + 2) % 8, octet2); if (rc != TT_SUCCESS) CU_FAIL("tt_setbits() returned an error code"); } /* check those bits */ for (i = 0; i < rows; i++) { rc = tt_getrow(tt, i, &row_ptr); if (rc != TT_SUCCESS || row_ptr == NULL) CU_FAIL("tt_getrow() failed"); octet_ptr = row_ptr + i; octet = *octet_ptr; if (octet != octet1) CU_FAIL("matching of first octet failed"); octet_ptr = row_ptr + ((i + 1) % 8); octet = *octet_ptr; if (octet != octet2) CU_FAIL("matching of second octet failed"); octet_ptr = row_ptr + ((i + 2) % 8); octet = *octet_ptr; if (octet != octet2) CU_FAIL("matching of third octet failed"); } /* destroy tt object */ rc = tt_delete(&tt); CU_ASSERT_EQUAL(rc, TT_SUCCESS); CU_ASSERT_PTR_NULL(tt); }
/* Return a pseudo-random number from a _fixed_ range, causing the * test results to be evenly scattered, yet reproducable. The returned * values are uniformly distributed within 0 <= randomish(range) < range. * * Algorithm source: ZX Spectrum, ROM addresses 0x25F8..0x2624 */ int randomish (int range) { static uint32_t seed = 0x1234; if (range > 65537) { CU_FAIL ("Cannot provide randomish ranges beyond 65536 -- use another PRN"); exit (1); } seed = ((seed + 1) * 75) % 65537 - 1; return floor (range * ((float) seed - 1.0) / 65536.0); }
void join_done_cb(int failure, void *arg) { if (failure) { CU_FAIL("Join failed"); } else { CU_PASS("Join successful"); } }
static int init_suite(void) { /* Must be called to create threads via ODP. */ if (odp_init_global(NULL, NULL) < 0) { CU_FAIL("Error: odp_init_global failed"); return -1; } return 0; }
static void test_ofp_packet_input_gre_orig_pkt_to_sp(void) { odp_packet_t pkt; int res; #ifdef SP odp_event_t ev; #endif my_test_val = TEST_LOCAL_HOOK_GRE_APP; /* Call ofp_packet_input using a GRE pkt with destination ip * that matches the local ip on ifnet, tunnel not found, * packet offered to GRE hook, returns continue. * Full packet sent to slowpath */ ifnet->ip_addr = local_ip; if (create_odp_packet_ip4(&pkt, gre_frame, sizeof(gre_frame), local_ip, tun_rem_ip + 1)) { CU_FAIL("Fail to create packet"); return; } res = ofp_packet_input(pkt, interface_queue[port], ofp_eth_vlan_processing); #ifdef SP CU_ASSERT_EQUAL(res, OFP_PKT_PROCESSED); CU_ASSERT_NOT_EQUAL_FATAL(ev = odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID); CU_ASSERT_EQUAL(odp_queue_deq(ifnet->spq_def), ODP_EVENT_INVALID); CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID); if (memcmp(odp_packet_data(odp_packet_from_event(ev)), in_pkt_data, sizeof(gre_frame))) CU_FAIL("corrupt data sent to slow path"); odp_packet_free(odp_packet_from_event(ev)); ifnet->ip_addr = 0; CU_PASS("ofp_packet_input_gre_orig_pkt_to_sp"); #else CU_ASSERT_EQUAL(res, OFP_PKT_DROP); CU_ASSERT_EQUAL(odp_queue_deq(ifnet->outq_def), ODP_EVENT_INVALID); #endif }
// @build_index:test_buid_index_5 => [build_index accède à une adresse mémoire à droite de la zone mémoire de la chaine de caractères passée en argument.] void test_build_index_5(void) { char *str; //On cherche à allouer 2 pages de la mémoire, la première avec le droit d'écriture et de lecture void *ptr = mmap(NULL, getpagesize()*2, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); if (ptr == MAP_FAILED) { CU_FAIL("La mémoire n'a pas pu être allouée pour le test test_build_index_5."); return; } // On protège ensuite la deuxième page mémoire en enlevant les droits de lecture et écriture mprotect(ptr+getpagesize(), getpagesize(), PROT_NONE); // On écrit à la fin de la première page mémoire la chaine "lorem\0" str = (char*) ptr+getpagesize()-7; strcpy(str, "lorem"); /* Si le code de l'utilisateur accède à de la mémoire située après le caractère de fin \0, * autrement dit la mémoire protégée de la seconde page, un segfault sera envoyé. * La mécanique utilisée ici permet d'"attraper" un segfault sans que tout le programme ne plante */ //On enregistre un signal handler. Cette fonction sera exécutée par le programme lorsque //le code produira une segmentation fault (ce qui lance le signal SIGSEGV). if (signal(SIGSEGV, sig_handler2) == SIG_ERR) { CU_FAIL("Impossible d'enregistrer un signal handler."); return; } //On définit ici un jump avec le label label_test_build_index_5 qui attend le paramètre 0 (par défaut) if(setjmp(label_test_build_index_5)==0) { build_index(str); } else{ //On a reçu un autre paramètre que 0, autrement dit le code a exécuté sig_handler //On a donc intercepté une segmentation fault, donc le code de l'utilisateur est fautif. CU_ASSERT_TRUE(0); } //On enlève le signal handler précédemment assigné à SIGSEGV signal(SIGSEGV, SIG_DFL); //On libère la mémoire précédemment allouée munmap(ptr, getpagesize()*2); }
static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts) { int ret, i, if_b; pktio_info_t pktios[MAX_NUM_IFACES]; pktio_info_t *io; uint32_t mtu, min_mtu = UINT32_MAX; /* create pktios and associate input/output queues */ for (i = 0; i < num_ifaces; ++i) { io = &pktios[i]; io->name = iface_name[i]; io->id = create_pktio(i, in_mode, ODP_PKTOUT_MODE_SEND); if (io->id == ODP_PKTIO_INVALID) { CU_FAIL("failed to open iface"); return; } io->outq = odp_pktio_outq_getdef(io->id); io->in_mode = in_mode; if (in_mode == ODP_PKTIN_MODE_POLL) { create_inq(io->id, ODP_QUEUE_TYPE_POLL); io->inq = odp_pktio_inq_getdef(io->id); } else if (in_mode == ODP_PKTIN_MODE_SCHED) { create_inq(io->id, ODP_QUEUE_TYPE_SCHED); io->inq = ODP_QUEUE_INVALID; } } for (i = 0; i < num_ifaces; ++i) { io = &pktios[i]; ret = odp_pktio_start(io->id); CU_ASSERT(ret == 0); mtu = odp_pktio_mtu(io->id); if (mtu < min_mtu) min_mtu = mtu; } /* Skip test if packet len is larger than the MTU */ if (min_mtu >= packet_len) { /* if we have two interfaces then send through one and receive * on another but if there's only one assume it's a loopback */ if_b = (num_ifaces == 1) ? 0 : 1; pktio_txrx_multi(&pktios[0], &pktios[if_b], num_pkts); } for (i = 0; i < num_ifaces; ++i) { ret = odp_pktio_stop(pktios[i].id); CU_ASSERT(ret == 0); if (in_mode != ODP_PKTIN_MODE_RECV) destroy_inq(pktios[i].id); ret = odp_pktio_close(pktios[i].id); CU_ASSERT(ret == 0); } }