int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp, uint32_t index, void *page, size_t *sizep, bool raw, int get_and_free) { struct tmem_pool *pool; int ret = -1; bool eph; if (!raw) { BUG_ON(irqs_disabled()); BUG_ON(in_softirq()); } pool = zcache_get_pool_by_id(cli_id, pool_id); eph = is_ephemeral(pool); if (likely(pool != NULL)) { if (atomic_read(&pool->obj_count) > 0) ret = tmem_get(pool, oidp, index, (char *)(page), sizep, raw, get_and_free); zcache_put_pool(pool); } WARN_ONCE((!is_ephemeral(pool) && (ret != 0)), "zcache_get fails on persistent pool, " "bad things are very likely to happen soon\n"); #ifdef RAMSTER_TESTING if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool))) pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret); #endif return ret; }
/* * free the pampd and remove it from any zcache lists * pampd must no longer be pointed to from any tmem data structures! */ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index, bool acct) { struct page *page = NULL; unsigned int zsize, zpages; bool zero_filled = false; BUG_ON(preemptible()); if (pampd == (void *)ZERO_FILLED) { zero_filled = true; zsize = 0; zpages = 1; dec_zcache_zero_filled_pages(); } if (pampd_is_remote(pampd) && !zero_filled) { BUG_ON(!ramster_enabled); pampd = ramster_pampd_free(pampd, pool, oid, index, acct); if (pampd == NULL) return; } if (is_ephemeral(pool)) { if (!zero_filled) page = zbud_free_and_delist((struct zbudref *)pampd, true, &zsize, &zpages); if (page) dec_zcache_eph_pageframes(); dec_zcache_eph_zpages(zpages); dec_zcache_eph_zbytes(zsize); /* FIXME CONFIG_RAMSTER... check acct parameter? */ } else { if (!zero_filled) page = zbud_free_and_delist((struct zbudref *)pampd, false, &zsize, &zpages); if (page) dec_zcache_pers_pageframes(); dec_zcache_pers_zpages(zpages); dec_zcache_pers_zbytes(zsize); } if (!is_local_client(pool->client) && !zero_filled) ramster_count_foreign_pages(is_ephemeral(pool), -1); if (page && !zero_filled) zcache_free_page(page); }
/* * icmp_packet_in_handler * * API for handling incoming packets */ indigo_core_listener_result_t icmpa_packet_in_handler (of_packet_in_t *packet_in) { of_octets_t octets; of_port_no_t port_no; of_match_t match; ppe_packet_t ppep; indigo_core_listener_result_t result = INDIGO_CORE_LISTENER_RESULT_PASS; uint32_t type, code; debug_counter_inc(&pkt_counters.icmp_total_in_packets); if (!packet_in) return INDIGO_CORE_LISTENER_RESULT_PASS; of_packet_in_data_get(packet_in, &octets); /* * Identify the recv port */ if (packet_in->version <= OF_VERSION_1_1) { return INDIGO_CORE_LISTENER_RESULT_PASS; } else { if (of_packet_in_match_get(packet_in, &match) < 0) { AIM_LOG_ERROR("ICMPA: match get failed"); debug_counter_inc(&pkt_counters.icmp_internal_errors); return INDIGO_CORE_LISTENER_RESULT_PASS; } port_no = match.fields.in_port; } if (port_no == OF_PORT_DEST_CONTROLLER) { debug_counter_inc(&pkt_counters.icmp_total_passed_packets); return INDIGO_CORE_LISTENER_RESULT_PASS; } if (port_no > MAX_PORTS) { AIM_LOG_ERROR("ICMPA: Port No: %d Out of Range %d", port_no, MAX_PORTS); debug_counter_inc(&pkt_counters.icmp_internal_errors); return INDIGO_CORE_LISTENER_RESULT_PASS; } /* * Check the packet-in reasons in metadata * * Icmp agent should not consume packets coming in due to L2 Src miss * and Station Move. */ if ((match.fields.metadata & OFP_BSN_PKTIN_FLAG_STATION_MOVE) || (match.fields.metadata & OFP_BSN_PKTIN_FLAG_NEW_HOST)) { debug_counter_inc(&pkt_counters.icmp_total_passed_packets); return INDIGO_CORE_LISTENER_RESULT_PASS; } ppe_packet_init(&ppep, octets.data, octets.bytes); if (ppe_parse(&ppep) < 0) { AIM_LOG_RL_ERROR(&icmp_pktin_log_limiter, os_time_monotonic(), "ICMPA: Packet_in parsing failed."); debug_counter_inc(&pkt_counters.icmp_internal_errors); return INDIGO_CORE_LISTENER_RESULT_PASS; } /* * Identify if this is an Echo Request, destined to one of VRouter */ if (ppe_header_get(&ppep, PPE_HEADER_ICMP)) { if (icmpa_reply(&ppep, port_no, &result)) { ++port_pkt_counters[port_no].icmp_echo_packets; return result; } } /* * To handle traceroute, we need to check for * a) UDP Packet * b) dest IP is Vrouter IP * c) UDP src and dest ports are ephemeral */ if (ppe_header_get(&ppep, PPE_HEADER_UDP) && ppe_header_get(&ppep, PPE_HEADER_IP4)) { uint32_t dest_ip, src_port, dest_port; ppe_field_get(&ppep, PPE_FIELD_IP4_DST_ADDR, &dest_ip); ppe_field_get(&ppep, PPE_FIELD_UDP_SRC_PORT, &src_port); ppe_field_get(&ppep, PPE_FIELD_UDP_DST_PORT, &dest_port); if (router_ip_check(dest_ip) && is_ephemeral(src_port) && is_ephemeral(dest_port)) { AIM_LOG_TRACE("ICMP Port Unreachable received on port: %d", port_no); type = ICMP_DEST_UNREACHABLE; code = 3; result = INDIGO_CORE_LISTENER_RESULT_DROP; if (icmpa_send(&ppep, port_no, type, code)) { ++port_pkt_counters[port_no].icmp_port_unreachable_packets; return result; } } } /* * Identify if the reason is valid for ICMP Agent to consume the packet */ if (match.fields.metadata & OFP_BSN_PKTIN_FLAG_L3_MISS) { AIM_LOG_TRACE("ICMP Dest Network Unreachable received on port: %d", port_no); type = ICMP_DEST_UNREACHABLE; code = 0; result = INDIGO_CORE_LISTENER_RESULT_DROP; if (icmpa_send(&ppep, port_no, type, code)) { ++port_pkt_counters[port_no].icmp_net_unreachable_packets; } } else if (match.fields.metadata & OFP_BSN_PKTIN_FLAG_TTL_EXPIRED) { AIM_LOG_TRACE("ICMP TTL Expired received on port: %d", port_no); type = ICMP_TIME_EXCEEDED; code = 0; result = INDIGO_CORE_LISTENER_RESULT_DROP; if (icmpa_send(&ppep, port_no, type, code)) { ++port_pkt_counters[port_no].icmp_time_exceeded_packets; } } return result; }