static int lua_to_host_set(struct lua_State *L, enum lua_place from, const char *name, struct host_set *h) { int pop; if ((pop = lua_getfrom(L, from, name)) < 0) return -1; if (!lua_istable(L, -1)) return -1; uint32_t port = 0, port_mask = 0; if (lua_to_ip(L, TABLE, "ip", &h->ip) || lua_to_int(L, TABLE, "port", &port)) return -1; if (lua_to_int(L, TABLE, "ip_mask", &h->ip_mask)) h->ip_mask = 0; if (lua_to_int(L, TABLE, "port_mask", &port_mask)) h->port_mask = 0; h->port = rte_bswap16(port); h->port_mask = rte_bswap16(port_mask); h->ip = rte_bswap32(h->ip); h->ip_mask = rte_bswap32(h->ip_mask); lua_pop(L, pop); return 0; }
static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, uint8_t *data_in, uint8_t *data_out) { int digest_size; uint8_t digest[qat_hash_get_digest_size( ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; uint32_t *hash_state_out_be32; uint64_t *hash_state_out_be64; int i; PMD_INIT_FUNC_TRACE(); digest_size = qat_hash_get_digest_size(hash_alg); if (digest_size <= 0) return -EFAULT; hash_state_out_be32 = (uint32_t *)data_out; hash_state_out_be64 = (uint64_t *)data_out; switch (hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: if (partial_hash_sha1(data_in, digest)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) *hash_state_out_be32 = rte_bswap32(*(((uint32_t *)digest)+i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: if (partial_hash_sha256(data_in, digest)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) *hash_state_out_be32 = rte_bswap32(*(((uint32_t *)digest)+i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: if (partial_hash_sha512(data_in, digest)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) *hash_state_out_be64 = rte_bswap64(*(((uint64_t *)digest)+i)); break; default: PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg); return -EFAULT; } return 0; }
void dump_acl4_rule(struct rte_mbuf *m, uint32_t sig) { uint32_t offset = sig & ~ACL_DENY_SIGNATURE; unsigned char a, b, c, d; struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod( m, unsigned char *)+sizeof(struct ether_hdr)); uint32_t_to_char(rte_bswap32(ipv4_hdr->src_addr), &a, &b, &c, &d); acl_log("Packet Src:%hhu.%hhu.%hhu.%hhu ", a, b, c, d); uint32_t_to_char(rte_bswap32(ipv4_hdr->dst_addr), &a, &b, &c, &d); acl_log("Dst:%hhu.%hhu.%hhu.%hhu ", a, b, c, d); acl_log("Src port:%hu,Dst port:%hu ", rte_bswap16(*(uint16_t *)(ipv4_hdr + 1)), rte_bswap16(*((uint16_t *)(ipv4_hdr + 1) + 1))); acl_log("hit ACL %d - ", offset); print_one_ipv4_rule(acl_config.rule_ipv4 + offset, 1); acl_log("\n\n"); }
static int test_byteorder(void) { uint16_t res_u16; uint32_t res_u32; uint64_t res_u64; res_u16 = rte_bswap16(u16); printf("%"PRIx16" -> %"PRIx16"\n", u16, res_u16); if (res_u16 != 0x3713) return -1; res_u32 = rte_bswap32(u32); printf("%"PRIx32" -> %"PRIx32"\n", u32, res_u32); if (res_u32 != 0xefbeaddeUL) return -1; res_u64 = rte_bswap64(u64); printf("%"PRIx64" -> %"PRIx64"\n", u64, res_u64); if (res_u64 != 0xcefabebafecaaddeULL) return -1; res_u16 = rte_bswap16(0x1337); printf("const %"PRIx16" -> %"PRIx16"\n", 0x1337, res_u16); if (res_u16 != 0x3713) return -1; res_u32 = rte_bswap32(0xdeadbeefUL); printf("const %"PRIx32" -> %"PRIx32"\n", (uint32_t) 0xdeadbeef, res_u32); if (res_u32 != 0xefbeaddeUL) return -1; res_u64 = rte_bswap64(0xdeadcafebabefaceULL); printf("const %"PRIx64" -> %"PRIx64"\n", (uint64_t) 0xdeadcafebabefaceULL, res_u64); if (res_u64 != 0xcefabebafecaaddeULL) return -1; return 0; }
static int rte_table_lpm_lookup( void *table, struct rte_mbuf **pkts, uint64_t pkts_mask, uint64_t *lookup_hit_mask, void **entries) { struct rte_table_lpm *lpm = (struct rte_table_lpm *) table; uint64_t pkts_out_mask = 0; uint32_t i; __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask); RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in); pkts_out_mask = 0; for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - __builtin_clzll(pkts_mask)); i++) { uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { struct rte_mbuf *pkt = pkts[i]; uint32_t ip = rte_bswap32( RTE_MBUF_METADATA_UINT32(pkt, lpm->offset)); int status; uint8_t nht_pos; status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos); if (status == 0) { pkts_out_mask |= pkt_mask; entries[i] = (void *) &lpm->nht[nht_pos * lpm->entry_size]; } } } *lookup_hit_mask = pkts_out_mask; RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - __builtin_popcountll(pkts_out_mask)); return 0; }
static void fill_table(struct task_args *targ, struct rte_table_hash *table) { struct cpe_table_data *cpe_table_data; const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); int ret = lua_to_cpe_table_data(prox_lua(), GLOBAL, targ->cpe_table_name, socket_id, &cpe_table_data); const uint8_t n_slaves = targ->nb_slave_threads; const uint8_t worker_id = targ->worker_thread_id; for (uint32_t i = 0; i < cpe_table_data->n_entries; ++i) { if (rte_bswap32(cpe_table_data->entries[i].ip) % n_slaves != worker_id) { continue; } struct cpe_table_entry *entry = &cpe_table_data->entries[i]; uint32_t port_idx = prox_cfg.cpe_table_ports[entry->port_idx]; PROX_PANIC(targ->mapping[port_idx] == 255, "Error reading cpe table: Mapping for port %d is missing", port_idx); struct cpe_key key = { .ip = entry->ip, .gre_id = entry->gre_id, }; struct cpe_data data = { .qinq_svlan = entry->svlan, .qinq_cvlan = entry->cvlan, .user = entry->user, .mac_port = { .mac = entry->eth_addr, .out_idx = targ->mapping[port_idx], }, .tsc = UINT64_MAX, }; int key_found; void* entry_in_hash; rte_table_hash_key8_ext_dosig_ops.f_add(table, &key, &data, &key_found, &entry_in_hash); } }
int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, uint8_t *cipherkey, uint32_t cipherkeylen, uint8_t *authkey, uint32_t authkeylen, uint32_t add_auth_data_length, uint32_t digestsize) { struct qat_alg_cd *content_desc = &cdesc->cd; struct icp_qat_hw_cipher_algo_blk *cipher = &content_desc->cipher; struct icp_qat_hw_auth_algo_blk *hash = &content_desc->hash; struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; void *ptr = &req_tmpl->cd_ctrl; struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; struct icp_qat_fw_la_auth_req_params *auth_param = (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); enum icp_qat_hw_cipher_convert key_convert; uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ uint16_t state1_size = 0; uint16_t state2_size = 0; PMD_INIT_FUNC_TRACE(); /* CD setup */ if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } else { key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_CMP_AUTH_RES); } cipher->aes.cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, cdesc->qat_dir); memcpy(cipher->aes.key, cipherkey, cipherkeylen); hash->sha.inner_setup.auth_config.reserved = 0; hash->sha.inner_setup.auth_config.config = ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, cdesc->qat_hash_alg, digestsize); hash->sha.inner_setup.auth_counter.counter = rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg)); /* Do precomputes */ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { if (qat_alg_do_precomputes(cdesc->qat_hash_alg, authkey, authkeylen, (uint8_t *)(hash->sha.state1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) { PMD_DRV_LOG(ERR, "(XCBC)precompute failed"); return -EFAULT; } } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { if (qat_alg_do_precomputes(cdesc->qat_hash_alg, cipherkey, cipherkeylen, (uint8_t *)(hash->sha.state1 + ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) { PMD_DRV_LOG(ERR, "(GCM)precompute failed"); return -EFAULT; } /* * Write (the length of AAD) into bytes 16-19 of state2 * in big-endian format. This field is 8 bytes */ *(uint32_t *)&(hash->sha.state1[ ICP_QAT_HW_GALOIS_128_STATE1_SZ + ICP_QAT_HW_GALOIS_H_SZ]) = rte_bswap32(add_auth_data_length); proto = ICP_QAT_FW_LA_GCM_PROTO; } else { if (qat_alg_do_precomputes(cdesc->qat_hash_alg, authkey, authkeylen, (uint8_t *)(hash->sha.state1), &state1_size)) { PMD_DRV_LOG(ERR, "(SHA)precompute failed"); return -EFAULT; } } /* Request template setup */ qat_alg_init_common_hdr(header); header->service_cmd_id = cdesc->qat_cmd; ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, ICP_QAT_FW_LA_DIGEST_IN_BUFFER); /* Configure the common header protocol flags */ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto); cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; /* Cipher CD config setup */ cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; cipher_cd_ctrl->cipher_cfg_offset = 0; /* Auth CD config setup */ hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; hash_cd_ctrl->inner_res_sz = digestsize; hash_cd_ctrl->final_sz = digestsize; hash_cd_ctrl->inner_state1_sz = state1_size; switch (cdesc->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: hash_cd_ctrl->inner_state2_sz = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA512: hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ); break; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + ICP_QAT_HW_GALOIS_E_CTR0_SZ; hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ; memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ); break; default: PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg); return -EFAULT; } hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + ((sizeof(struct icp_qat_hw_auth_setup) + RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); auth_param->auth_res_sz = digestsize; if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); } else { PMD_DRV_LOG(ERR, "invalid param, only authenticated " "encryption supported"); return -EFAULT; } return 0; }
int mg_table_lpm_lookup( void *table, struct rte_mbuf **pkts, uint64_t pkts_mask, uint64_t *lookup_hit_mask, void **entries) { //printf("ENTRIES = %p\n", entries); struct rte_table_lpm *lpm = (struct rte_table_lpm *) table; uint64_t pkts_out_mask = 0; uint32_t i; //struct rte_pktmbuf pkt0 = pkts[0]->pkt; //printf("headroom: %d\n", rte_pktmbuf_headroom(pkts[0])); ////void * data = pkt0.data+128; //void * data = pkt0.data; //printhex("data = ", data, 256); //printhex("data buf addr = ", pkts[0]->buf_addr, 256); //printhex("pktinmask = ", &pkts_mask, 8); //printhex("ipaddr = ", pkts[0]->buf_addr + lpm->offset, 4); pkts_out_mask = 0; if(!pkts_mask){ // workaround for DPDK bug: // __builtin_clzll(x) is undefined for x = 0 *lookup_hit_mask = pkts_out_mask; return 0; } for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX - __builtin_clzll(pkts_mask)); i++) { //printf("loop %d\n", i); uint64_t pkt_mask = 1LLU << i; if (pkt_mask & pkts_mask) { //printf("pktmaskmatch\n"); struct rte_mbuf *pkt = pkts[i]; //uint32_t ip = rte_bswap32( // *((uint32_t*)(&RTE_MBUF_METADATA_UINT8(pkt, lpm->offset)))); uint32_t ip = rte_bswap32( *((uint32_t*)(pkt->buf_addr + lpm->offset)) ); //uint32_t ip = ( *((uint32_t*)(pkt->buf_addr + lpm->offset)) ); //printhex("checking ip: ", &ip, 4); int status; uint8_t nht_pos; status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos); //printf(" status: %d\n", status); if (status == 0) { //printf("HIT HIT HIT\n"); pkts_out_mask |= pkt_mask; entries[i] = (void *) &lpm->nht[nht_pos * lpm->entry_size]; }else{ entries[i] = NULL; } //printf("r: entries[%d\t] = %p\n", i, entries[i]); //printf("r: entries pp[%d\t] = %p\n", i, entries+i); //printf("r: entries[%d\t] = %p\n", i, *(entries+i)); //printf(" iface = %d\n", ((uint8_t*)(entries[i]))[4]); } // FIXME: if input mask does not match should we also set entry ptr to NULL? } *lookup_hit_mask = pkts_out_mask; return 0; }
void app_main_loop_worker_pipeline_lpm_ipv6(void) { struct rte_pipeline_params pipeline_params = { .name = "pipeline", .socket_id = rte_socket_id(), }; struct rte_pipeline *p; uint32_t port_in_id[APP_MAX_PORTS]; uint32_t port_out_id[APP_MAX_PORTS]; uint32_t table_id; uint32_t i; RTE_LOG(INFO, USER1, "Core %u is doing work (pipeline with IPv6 LPM table)\n", rte_lcore_id()); /* Pipeline configuration */ p = rte_pipeline_create(&pipeline_params); if (p == NULL) rte_panic("Unable to configure the pipeline\n"); /* Input port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ring_reader_params port_ring_params = { .ring = app.rings_rx[i], }; struct rte_pipeline_port_in_params port_params = { .ops = &rte_port_ring_reader_ops, .arg_create = (void *) &port_ring_params, .f_action = NULL, .arg_ah = NULL, .burst_size = app.burst_size_worker_read, }; if (rte_pipeline_port_in_create(p, &port_params, &port_in_id[i])) rte_panic("Unable to configure input port for " "ring %d\n", i); } /* Output port configuration */ for (i = 0; i < app.n_ports; i++) { struct rte_port_ring_writer_params port_ring_params = { .ring = app.rings_tx[i], .tx_burst_sz = app.burst_size_worker_write, }; struct rte_pipeline_port_out_params port_params = { .ops = &rte_port_ring_writer_ops, .arg_create = (void *) &port_ring_params, .f_action = NULL, .f_action_bulk = NULL, .arg_ah = NULL, }; if (rte_pipeline_port_out_create(p, &port_params, &port_out_id[i])) rte_panic("Unable to configure output port for " "ring %d\n", i); } /* Table configuration */ { struct rte_table_lpm_ipv6_params table_lpm_ipv6_params = { .name = "LPM", .n_rules = 1 << 24, .number_tbl8s = 1 << 21, .entry_unique_size = sizeof(struct rte_pipeline_table_entry), .offset = APP_METADATA_OFFSET(32), }; struct rte_pipeline_table_params table_params = { .ops = &rte_table_lpm_ipv6_ops, .arg_create = &table_lpm_ipv6_params, .f_action_hit = NULL, .f_action_miss = NULL, .arg_ah = NULL, .action_data_size = 0, }; if (rte_pipeline_table_create(p, &table_params, &table_id)) rte_panic("Unable to configure the IPv6 LPM table\n"); } /* Interconnecting ports and tables */ for (i = 0; i < app.n_ports; i++) if (rte_pipeline_port_in_connect_to_table(p, port_in_id[i], table_id)) rte_panic("Unable to connect input port %u to " "table %u\n", port_in_id[i], table_id); /* Add entries to tables */ for (i = 0; i < app.n_ports; i++) { struct rte_pipeline_table_entry entry = { .action = RTE_PIPELINE_ACTION_PORT, {.port_id = port_out_id[i & (app.n_ports - 1)]}, }; struct rte_table_lpm_ipv6_key key; struct rte_pipeline_table_entry *entry_ptr; uint32_t ip; int key_found, status; key.depth = 8 + __builtin_popcount(app.n_ports - 1); ip = rte_bswap32(i << (24 - __builtin_popcount(app.n_ports - 1))); memcpy(key.ip, &ip, sizeof(uint32_t)); printf("Adding rule to IPv6 LPM table (IPv6 destination = " "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:" "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u => " "port out = %u)\n", key.ip[0], key.ip[1], key.ip[2], key.ip[3], key.ip[4], key.ip[5], key.ip[6], key.ip[7], key.ip[8], key.ip[9], key.ip[10], key.ip[11], key.ip[12], key.ip[13], key.ip[14], key.ip[15], key.depth, i); status = rte_pipeline_table_entry_add(p, table_id, &key, &entry, &key_found, &entry_ptr); if (status < 0) rte_panic("Unable to add entry to table %u (%d)\n", table_id, status); } /* Enable input ports */ for (i = 0; i < app.n_ports; i++) if (rte_pipeline_port_in_enable(p, port_in_id[i])) rte_panic("Unable to enable input port %u\n", port_in_id[i]); /* Check pipeline consistency */ if (rte_pipeline_check(p) < 0) rte_panic("Pipeline consistency check failed\n"); /* Run-time */ #if APP_FLUSH == 0 for ( ; ; ) rte_pipeline_run(p); #else for (i = 0; ; i++) { rte_pipeline_run(p); if ((i & APP_FLUSH) == 0) rte_pipeline_flush(p); } #endif }
/** * Process a crypto operation and complete a JOB_AES_HMAC job structure for * submission to the multi buffer library for processing. * * @param qp queue pair * @param op symmetric crypto operation * @param session GCM session * * @return * */ static int process_gcm_crypto_op(struct rte_crypto_sym_op *op, struct aesni_gcm_session *session) { uint8_t *src, *dst; struct rte_mbuf *m_src = op->m_src; uint32_t offset = op->cipher.data.offset; uint32_t part_len, total_len, data_len; RTE_ASSERT(m_src != NULL); while (offset >= m_src->data_len) { offset -= m_src->data_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); } data_len = m_src->data_len - offset; part_len = (data_len < op->cipher.data.length) ? data_len : op->cipher.data.length; /* Destination buffer is required when segmented source buffer */ RTE_ASSERT((part_len == op->cipher.data.length) || ((part_len != op->cipher.data.length) && (op->m_dst != NULL))); /* Segmented destination buffer is not supported */ RTE_ASSERT((op->m_dst == NULL) || ((op->m_dst != NULL) && rte_pktmbuf_is_contiguous(op->m_dst))); dst = op->m_dst ? rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, op->cipher.data.offset) : rte_pktmbuf_mtod_offset(op->m_src, uint8_t *, op->cipher.data.offset); src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); /* sanity checks */ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 && op->cipher.iv.length != 0) { GCM_LOG_ERR("iv"); return -1; } /* * GCM working in 12B IV mode => 16B pre-counter block we need * to set BE LSB to 1, driver expects that 16B is allocated */ if (op->cipher.iv.length == 12) { uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12]; *iv_padd = rte_bswap32(1); } if (op->auth.digest.length != 16 && op->auth.digest.length != 12 && op->auth.digest.length != 8) { GCM_LOG_ERR("digest"); return -1; } if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { aesni_gcm_enc[session->key].init(&session->gdata, op->cipher.iv.data, op->auth.aad.data, (uint64_t)op->auth.aad.length); aesni_gcm_enc[session->key].update(&session->gdata, dst, src, (uint64_t)part_len); total_len = op->cipher.data.length - part_len; while (total_len) { dst += part_len; m_src = m_src->next; RTE_ASSERT(m_src != NULL); src = rte_pktmbuf_mtod(m_src, uint8_t *); part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; aesni_gcm_enc[session->key].update(&session->gdata, dst, src, (uint64_t)part_len); total_len -= part_len; } aesni_gcm_enc[session->key].finalize(&session->gdata, op->auth.digest.data, (uint64_t)op->auth.digest.length); } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */