int memcmp(const void *m1, const void *m2, __kernel_size_t n) { const u8 *s1 = (u8 *)m1; const u8 *s2 = (u8 *)m2; const u64 *a1, *a2; /* If the size is too small or either pointer is unaligned then we punt * to the byte compare loop. Hopefully this will not turn up in inner * loops. */ CVMX_PREFETCH0(m1); CVMX_PREFETCH0(m2); if (!TOO_SMALL(n) && !UNALIGNED(s1, s2)) { /* Otherwise, load and compare the blocks of memory to use * one word at a time. */ a1 = (u64 *)s1; a2 = (u64 *)s2; while (n >= LBLOCKSIZE) { if (*a1 != *a2) break; a1++; a2++; n -= LBLOCKSIZE; } /* check m mod LBLOCKSIZE remaining characters */ s1 = (u8 *)a1; s2 = (u8 *)a2; } else if (!TOO_SMALL(n) && UNALIGNED(s1, s2)) { u64 t1, t2; while (n >= LBLOCKSIZE) { CVMX_LOADUNA_INT64(t1, s1, 0); CVMX_LOADUNA_INT64(t2, s2, 0); if (t1 != t2) break; s1 += LBLOCKSIZE; s2 += LBLOCKSIZE; n-= LBLOCKSIZE; } } /* Check m mod LBLOCKSIZE remaining characters */ while (n--) { if (*s1 != *s2) return *s1 - *s2; s1++; s2++; } return 0; }
/** * Process incoming packets. */ int inic_data_loop(void) { cvm_common_wqe_t *swp = NULL; cvm_tcp_in_endpoints_t conn; cvm_tcp_tcphdr_t *th = NULL; cvm_ip_ip_t *ih = NULL; cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get(); uint64_t cpu_clock_hz = sys_info_ptr->cpu_clock_hz; uint64_t tick_cycle = cvmx_get_cycle(); uint64_t tick_step; uint32_t idle_processing_interval_ticks = (CVM_COMMON_IDLE_PROCESSING_INTERVAL)*(1000*1000)/(CVM_COMMON_TICK_LEN_US); uint32_t idle_processing_last_ticks = 0; #ifdef INET6 struct cvm_ip6_ip6_hdr *ip6 = NULL; #ifdef CVM_ENET_TUNNEL struct cvm_ip6_ip6_hdr *i6h = NULL; #endif #endif #ifdef CVM_CLI_APP uint64_t idle_cycle_start_value; #endif /* for the simulator */ if (cpu_clock_hz == 0) { cpu_clock_hz = 333000000; } tick_step = (CVM_COMMON_TICK_LEN_US * cpu_clock_hz) / 1000000; cvm_debug_print_interval = cpu_clock_hz; #ifndef REAL_HW /* for the simulator, set the debug interval to be 3M cycles */ cvm_debug_print_interval = 3000000; #endif #ifdef DUTY_CYCLE start_cycle = cvmx_get_cycle(); process_count = 0; #endif if (cvmx_coremask_first_core(coremask_data)) { /* Initiate a timer transaction for arp entry timeouts */ //if(cvm_enet_arp_timeout_init() != CVMX_TIM_STATUS_SUCCESS) //{ // printf("Failed init of cvm_ip_arp_timeout_init\n"); //} } #if defined(CVM_COMBINED_APP_STACK) /* Flush the packets sent by main_global and main_local */ /* printf("before cvm_send_packet () \n "); if (out_swp) { cvm_send_packet (); } printf("after cvm_send_packet () \n "); */ uint64_t app_timeout = cvmx_get_cycle (); #endif /* start the main loop */ while (1) { #ifdef DUTY_CYCLE end_cycle = cvmx_get_cycle(); /* check the wrap around case */ if (end_cycle < start_cycle) end_cycle += cpu_clock_hz; if ((end_cycle - start_cycle) > cvm_debug_print_interval) { inic_do_per_second_duty_cycle_processing(); } #endif /* DUTY_CYCLE */ cvmx_pow_work_request_async_nocheck(CVMX_SCR_WORK, 1); /* update the ticks variable */ while (cvmx_get_cycle() - tick_cycle > tick_step) { tick_cycle += tick_step; cvm_tcp_ticks++; if (!(cvm_tcp_ticks & 0x1f)) CVM_COMMON_HISTORY_SET_CYCLE(); } /* do common idle processing */ if ( (cvm_tcp_ticks - idle_processing_last_ticks) > idle_processing_interval_ticks) { if (cvmx_coremask_first_core(coremask_data)) { cvm_common_do_idle_processing(); } idle_processing_last_ticks = cvm_tcp_ticks; } #ifdef CVM_CLI_APP idle_cycle_start_value = cvmx_get_cycle(); #endif /* get work entry */ swp = (cvm_common_wqe_t *)cvmx_pow_work_response_async(CVMX_SCR_WORK); if (swp == NULL) { idle_counter++; if(core_id == highest_core_id) { cvm_enet_check_link_status(); } #ifdef CVM_CLI_APP cvmx_fau_atomic_add64(core_idle_cycles[core_id], (cvmx_get_cycle()-idle_cycle_start_value) ); #endif continue; } CVM_COMMON_EXTRA_STATS_ADD64 (CVM_FAU_REG_WQE_RCVD, 1); #ifdef WORK_QUEUE_ENTRY_SIZE_128 // { CVMX_PREFETCH0(swp); #else /* Prefetch work-queue entry */ CVMX_PREFETCH0(swp); CVMX_PREFETCH128(swp); #endif // WORK_QUEUE_ENTRY_SIZE_128 } out_swp = 0; out_swp_tail = 0; #ifdef DUTY_CYCLE /* we are about to start processing the packet - remember the cycle count */ process_start_cycle = cvmx_get_cycle(); #endif /* Short cut the common case */ if (cvmx_likely(swp->hw_wqe.unused == 0)) { goto packet_from_the_wire; } printf("Get work with unused is %X\n", swp->hw_wqe.unused); { { packet_from_the_wire: #if CVM_PKO_DONTFREE swp->hw_wqe.packet_ptr.s.i = 0; #endif #ifdef SANITY_CHECKS /* we have a work queue entry - do input sanity checks */ ret = cvm_common_input_sanity_and_buffer_count_update(swp); #endif if (cvmx_unlikely(swp->hw_wqe.word2.s.rcv_error)) { goto discard_swp; /* Receive error */ } #ifndef WORK_QUEUE_ENTRY_SIZE_128 // { { /* Make sure pre-fetch completed */ uint64_t dp = *(volatile uint64_t*)&swp->next; } #endif // WORK_QUEUE_ENTRY_SIZE_128 } { /* Initialize SW portion of the work-queue entry */ uint64_t *dptr = (uint64_t*)(&swp->next); dptr[0] = 0; dptr[1] = 0; dptr[2] = 0; dptr[3] = 0; } if(cvmx_unlikely(swp->hw_wqe.word2.s.not_IP)) { goto output; } /* Shortcut classification to avoid multiple lookups */ if( #ifndef INET6 swp->hw_wqe.word2.s.is_v6 || #endif swp->hw_wqe.word2.s.is_bcast #ifndef INET6 || swp->hw_wqe.word2.s.is_mcast #endif ) { goto discard_swp; /* Receive error */ } /* Packet is unicast IPv4, without L2 errors */ /* (All IP exceptions are dropped. This currently includes * IPv4 options and IPv6 extension headers.) */ if(cvmx_unlikely(swp->hw_wqe.word2.s.IP_exc)) { goto discard_swp; } /* Packet is Ipv4 (and no IP exceptions) */ if (cvmx_unlikely(swp->hw_wqe.word2.s.is_frag || !swp->hw_wqe.word2.s.tcp_or_udp)) { goto output; } #ifdef ANVL_RFC_793_COMPLIANCE /* RFC 793 says that: - We should send a RST out when we get a packet with FIN set without the ACK bit set in the flags field. - We should send a RST out when we get a packet with no flag set. Hence, let TCP stack handle these conditions. */ if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error && (cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG8_ERR) && (cvmx_pip_l4_err_t)(swp->hw_wqe.word2.s.err_code != CVMX_PIP_TCP_FLG9_ERR))) #else if (cvmx_unlikely(swp->hw_wqe.word2.s.L4_error)) #endif { cvm_tcp_handle_error(swp); goto discard_swp; } /* Packet is not fragmented, TCP/UDP, no IP exceptions/L4 errors */ /* We can try an L4 lookup now, but we need all the information */ ih = ((cvm_ip_ip_t *)&(swp->hw_wqe.packet_data[CVM_COMMON_PD_ALIGN])); if (!swp->hw_wqe.word2.s.is_v6) { /* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */ swp->l4_offset = ((uint16_t)(ih->ip_hl) << 2) + CVM_COMMON_PD_ALIGN; swp->l4_prot = ih->ip_p; } #ifdef INET6 else { ip6 = (struct cvm_ip6_ip6_hdr *) &swp->hw_wqe.packet_data[CVM_COMMON_IP6_PD_ALIGN]; CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, "%s: %d Packet trace Src: %s/%d Dest: %s/%d prot: %d len: %d\n", __FUNCTION__, __LINE__, cvm_ip6_ip6_sprintf (&ip6->ip6_dst), conn.ie_fport, cvm_ip6_ip6_sprintf (&ip6->ip6_src), conn.ie_lport, swp->l4_prot, swp->hw_wqe.len); /* for IPv4, we must subtract CVM_COMMON_PD_ALIGN rom tcp_offset to get the offset in the mbuf */ swp->l4_offset = CVM_IP6_IP6_HDRLEN; swp->l4_prot = ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt; } #endif th = ((cvm_tcp_tcphdr_t *)&(swp->hw_wqe.packet_data[swp->l4_offset])); /* check if it is a TCP packet */ if (swp->l4_prot == CVM_IP_IPPROTO_TCP) { process_handle(swp); #ifdef INET6 if (!swp->hw_wqe.word2.s.is_v6) #endif { CVM_TCP_TCP_DUMP ((void*)ih); /* assume IPv4 for now */ conn.ie_laddr = ih->ip_dst.s_addr; conn.ie_faddr = ih->ip_src.s_addr; conn.ie_lport = th->th_dport; conn.ie_fport = th->th_sport; } #ifdef INET6 else { /* assume IPv4 for now */ memcpy (&conn.ie6_laddr, &ip6->ip6_dst, sizeof (struct cvm_ip6_in6_addr)); memcpy (&conn.ie6_faddr, &ip6->ip6_src, sizeof (struct cvm_ip6_in6_addr)); conn.ie_lport = th->th_dport; conn.ie_fport = th->th_sport; /* do a TCP lookup */ swp->tcb = cvm_tcp6_lookup (swp); CVM_COMMON_DBG_MSG (CVM_COMMON_DBG_LVL_5, "%s: %d TCPv6 lookup Src: %s/%d Dest: %s/%d ret_tcb: 0x%llx\n", __FUNCTION__, __LINE__, cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_faddr), conn.ie_fport, cvm_ip6_ip6_sprintf ((cvm_ip6_in6_addr_t *) &conn.ie6_laddr), conn.ie_lport, CAST64(swp->tcb)); } #endif // INET6 } goto output; } /* packet from wire */ } /* switch */ output: CVMX_SYNCWS; /* Send packet out */ if (out_swp) { cvm_send_packet(); } if(swp != NULL) { S3_send_packet((cvmx_wqe_t *)swp); swp = NULL; } #ifdef DUTY_CYCLE process_end_cycle = cvmx_get_cycle(); process_count += (process_end_cycle - process_start_cycle); #endif } return (0); discard_swp: /* Free the chained buffers */ cvm_common_packet_free(swp); /* Free the work queue entry */ cvm_common_free_fpa_buffer(swp, CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE / CVMX_CACHE_LINE_SIZE); swp = NULL; goto output; } /* inic_data_loop */