void __init arm_dt_memblock_reserve(void) { u64 *reserve_map, base, size; if (!initial_boot_params) /*! 20131005 device tree 영역이 없으면 리턴 */ return; /* Reserve the dtb region */ memblock_reserve(virt_to_phys(initial_boot_params), be32_to_cpu(initial_boot_params->totalsize)); /*! 20131005 * be32_to_cpu: big endian을 cpu의 endian으로 바꾸어준다. * device tree 영역을 reserved로 표시한다. */ /* * Process the reserve map. This will probably overlap the initrd * and dtb locations which are already reserved, but overlaping * doesn't hurt anything */ reserve_map = ((void*)initial_boot_params) + be32_to_cpu(initial_boot_params->off_mem_rsvmap); /*! 20131005 * off_mem_rsvmap : 메모리 reserve map의 offset * device tree 에서 정의한 reserved의 영역을 reserve region에 추가한다. */ while (1) { base = be64_to_cpup(reserve_map++); size = be64_to_cpup(reserve_map++); if (!size) break; memblock_reserve(base, size); } }
void __init arm_dt_memblock_reserve(void) { u64 *reserve_map, base, size; if (!initial_boot_params) return; /* Reserve the dtb region */ memblock_reserve(virt_to_phys(initial_boot_params), be32_to_cpu(initial_boot_params->totalsize)); /* * Process the reserve map. This will probably overlap the initrd * and dtb locations which are already reserved, but overlaping * doesn't hurt anything */ reserve_map = ((void*)initial_boot_params) + be32_to_cpu(initial_boot_params->off_mem_rsvmap); while (1) { base = be64_to_cpup(reserve_map++); size = be64_to_cpup(reserve_map++); if (!size) break; memblock_reserve(base, size); } }
static int __init mem_rsvmap_init_module(void) { int i = 0, j = 0; u64 *reserve_map; u64 base, size; init_timer(&delay_timer); delay_timer.function = mem_rsvmap_exp_handle; delay_timer.expires = jiffies + HZ * 60; reserve_map = ((void *)initial_boot_params) + be32_to_cpu(initial_boot_params->off_mem_rsvmap); while (1) { base = be64_to_cpup(reserve_map + i); size = be64_to_cpup(reserve_map + i + 1); if (!size) goto success; i += 2; j = i; while (1) { u64 base_next, size_next; base_next = be64_to_cpup(reserve_map + j); size_next = be64_to_cpup(reserve_map + j + 1); if (!size_next) break; j += 2; if (base_next >= (base + size)) continue; if (base >= (base_next + size_next)) continue; goto error; } } error: printk(KERN_ERR" Error: resvered memory overlapping!\n"); add_timer(&delay_timer); success: return 0; }
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) { struct device_node *phbn; const __be64 *prop64; u64 hub_id; void *tce_mem; uint64_t tce_per_phb; int64_t rc; int phb_count = 0; pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-hubid\" property !\n"); return; } hub_id = be64_to_cpup(prop64); pr_info(" HUB-ID : 0x%016llx\n", hub_id); /* Count child PHBs and calculate TCE space per PHB */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) phb_count++; } if (phb_count <= 0) { pr_info(" No PHBs for Hub %s\n", np->full_name); return; } tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); pr_info(" Allocating %lld MB of TCE memory per PHB\n", tce_per_phb >> 20); /* Currently allocate 16M of TCE memory for every Hub * * XXX TODO: Make it chip local if possible */ tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY); pr_debug(" TCE : 0x%016lx..0x%016lx\n", __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), P5IOC2_TCE_MEMORY); if (rc != OPAL_SUCCESS) { pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc); return; } /* Initialize PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) { pnv_pci_init_p5ioc2_phb(phbn, hub_id, tce_mem, tce_per_phb); tce_mem += tce_per_phb; } } }
int nbd_receive_reply(int csock, struct nbd_reply *reply) { uint8_t buf[4 + 4 + 8]; uint32_t magic; memset(buf, 0xAA, sizeof(buf)); if (read_sync(csock, buf, sizeof(buf)) != sizeof(buf)) { LOG("read failed"); errno = EINVAL; return -1; } /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) [ 7 .. 15] handle */ magic = be32_to_cpup((uint32_t*)buf); reply->error = be32_to_cpup((uint32_t*)(buf + 4)); reply->handle = be64_to_cpup((uint64_t*)(buf + 8)); TRACE("Got reply: " "{ magic = 0x%x, .error = %d, handle = %" PRIu64" }", magic, reply->error, reply->handle); if (magic != NBD_REPLY_MAGIC) { LOG("invalid magic (got 0x%x)", magic); errno = EINVAL; return -1; } return 0; }
int nbd_receive_negotiate(int csock, off_t *size, size_t *blocksize) { char buf[8 + 8 + 8 + 128]; uint64_t magic; TRACE("Receiving negotation."); if (read_sync(csock, buf, sizeof(buf)) != sizeof(buf)) { LOG("read failed"); errno = EINVAL; return -1; } magic = be64_to_cpup((uint64_t*)(buf + 8)); *size = be64_to_cpup((uint64_t*)(buf + 16)); *blocksize = 1024; TRACE("Magic is %c%c%c%c%c%c%c%c", qemu_isprint(buf[0]) ? buf[0] : '.', qemu_isprint(buf[1]) ? buf[1] : '.', qemu_isprint(buf[2]) ? buf[2] : '.', qemu_isprint(buf[3]) ? buf[3] : '.', qemu_isprint(buf[4]) ? buf[4] : '.', qemu_isprint(buf[5]) ? buf[5] : '.', qemu_isprint(buf[6]) ? buf[6] : '.', qemu_isprint(buf[7]) ? buf[7] : '.'); TRACE("Magic is 0x%" PRIx64, magic); TRACE("Size is %" PRIu64, *size); if (memcmp(buf, "NBDMAGIC", 8) != 0) { LOG("Invalid magic received"); errno = EINVAL; return -1; } TRACE("Checking magic"); if (magic != 0x00420281861253LL) { LOG("Bad magic received"); errno = EINVAL; return -1; } return 0; }
static u64 value_read(int offset, int size, void *structure) { switch (size) { case 1: return *(u8 *) (structure + offset); case 2: return be16_to_cpup((__be16 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset)); default: printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); return 0; } }
static ssize_t nbd_receive_request(int csock, struct nbd_request *request) { uint8_t buf[4 + 4 + 8 + 8 + 4]; uint32_t magic; ssize_t ret; ret = read_sync(csock, buf, sizeof(buf)); if (ret < 0) { return ret; } if (ret != sizeof(buf)) { LOG("read failed"); return -EINVAL; } /* Request [ 0 .. 3] magic (NBD_REQUEST_MAGIC) [ 4 .. 7] type (0 == READ, 1 == WRITE) [ 8 .. 15] handle [16 .. 23] from [24 .. 27] len */ magic = be32_to_cpup((uint32_t*)buf); request->type = be32_to_cpup((uint32_t*)(buf + 4)); request->handle = be64_to_cpup((uint64_t*)(buf + 8)); request->from = be64_to_cpup((uint64_t*)(buf + 16)); request->len = be32_to_cpup((uint32_t*)(buf + 24)); TRACE("Got request: " "{ magic = 0x%x, .type = %d, from = %" PRIu64" , len = %u }", magic, request->type, request->from, request->len); if (magic != NBD_REQUEST_MAGIC) { LOG("invalid magic (got 0x%x)", magic); return -EINVAL; } return 0; }
int read_log(struct tpm_bios_log *log) { struct device_node *np; const u32 *sizep; const __be64 *basep; if (log->bios_event_log != NULL) { pr_err("%s: ERROR - Eventlog already initialized\n", __func__); return -EFAULT; } np = of_find_node_by_name(NULL, "ibm,vtpm"); if (!np) { pr_err("%s: ERROR - IBMVTPM not supported\n", __func__); return -ENODEV; } sizep = of_get_property(np, "linux,sml-size", NULL); if (sizep == NULL) { pr_err("%s: ERROR - SML size not found\n", __func__); goto cleanup_eio; } if (*sizep == 0) { pr_err("%s: ERROR - event log area empty\n", __func__); goto cleanup_eio; } basep = of_get_property(np, "linux,sml-base", NULL); if (basep == NULL) { pr_err("%s: ERROR - SML not found\n", __func__); goto cleanup_eio; } of_node_put(np); log->bios_event_log = kmalloc(*sizep, GFP_KERNEL); if (!log->bios_event_log) { pr_err("%s: ERROR - Not enough memory for BIOS measurements\n", __func__); return -ENOMEM; } log->bios_event_log_end = log->bios_event_log + *sizep; memcpy(log->bios_event_log, __va(be64_to_cpup(basep)), *sizep); return 0; cleanup_eio: of_node_put(np); return -EIO; }
/** * of_property_read_u64_index - Find and read a u64 from a multi-value property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the u64 in the list of values * @out_value: pointer to return value, modified only if no error. * * Search for a property in a device node and read nth 64-bit value from * it. Returns 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_value is modified only if a valid u64 value can be decoded. */ int of_property_read_u64_index(const struct device_node *np, const char *propname, u32 index, u64 *out_value) { const u64 *val = of_find_property_value_of_size(np, propname, ((index + 1) * sizeof(*out_value)), 0, NULL); if (IS_ERR(val)) return PTR_ERR(val); *out_value = be64_to_cpup(((__be64 *)val) + index); return 0; }
static unsigned long get_memblock_size(void) { struct device_node *np; unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (np) { const __be64 *size; size = of_get_property(np, "ibm,lmb-size", NULL); if (size) memblock_size = be64_to_cpup(size); of_node_put(np); } else if (machine_is(pseries)) { /* This fallback really only applies to pseries */ unsigned int memzero_size = 0; np = of_find_node_by_path("/memory@0"); if (np) { if (!of_address_to_resource(np, 0, &r)) memzero_size = resource_size(&r); of_node_put(np); } if (memzero_size) { /* We now know the size of memory@0, use this to find * the first memoryblock and get its size. */ char buf[64]; sprintf(buf, "/memory@%x", memzero_size); np = of_find_node_by_path(buf); if (np) { if (!of_address_to_resource(np, 0, &r)) memblock_size = resource_size(&r); of_node_put(np); } } } return memblock_size; }
static unsigned long get_memblock_size(void) { struct device_node *np; unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (np) { const __be64 *size; size = of_get_property(np, "ibm,lmb-size", NULL); if (size) memblock_size = be64_to_cpup(size); of_node_put(np); } else if (machine_is(pseries)) { unsigned int memzero_size = 0; np = of_find_node_by_path("/memory@0"); if (np) { if (!of_address_to_resource(np, 0, &r)) memzero_size = resource_size(&r); of_node_put(np); } if (memzero_size) { char buf[64]; sprintf(buf, "/memory@%x", memzero_size); np = of_find_node_by_path(buf); if (np) { if (!of_address_to_resource(np, 0, &r)) memblock_size = resource_size(&r); of_node_put(np); } } } return memblock_size; }
ssize_t nbd_receive_reply(int csock, struct nbd_reply *reply) { uint8_t buf[NBD_REPLY_SIZE]; uint32_t magic; ssize_t ret; ret = read_sync(csock, buf, sizeof(buf)); if (ret < 0) { return ret; } if (ret != sizeof(buf)) { LOG("read failed"); return -EINVAL; } /* Reply [ 0 .. 3] magic (NBD_REPLY_MAGIC) [ 4 .. 7] error (0 == no error) [ 7 .. 15] handle */ magic = be32_to_cpup((uint32_t*)buf); reply->error = be32_to_cpup((uint32_t*)(buf + 4)); reply->handle = be64_to_cpup((uint64_t*)(buf + 8)); reply->error = nbd_errno_to_system_errno(reply->error); TRACE("Got reply: " "{ magic = 0x%x, .error = %d, handle = %" PRIu64" }", magic, reply->error, reply->handle); if (magic != NBD_REPLY_MAGIC) { LOG("invalid magic (got 0x%x)", magic); return -EINVAL; } return 0; }
static int regmap_mmio_gather_write(void *context, const void *reg, size_t reg_size, const void *val, size_t val_size) { struct regmap_mmio_context *ctx = context; u32 offset; BUG_ON(reg_size != 4); offset = be32_to_cpup(reg); while (val_size) { switch (ctx->val_bytes) { case 1: writeb(*(u8 *)val, ctx->regs + offset); break; case 2: writew(be16_to_cpup(val), ctx->regs + offset); break; case 4: writel(be32_to_cpup(val), ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: writeq(be64_to_cpup(val), ctx->regs + offset); break; #endif default: /* Should be caught by regmap_mmio_check_config */ BUG(); } val_size -= ctx->val_bytes; val += ctx->val_bytes; offset += ctx->val_bytes; } return 0; }
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, void *tce_mem, u64 tce_size) { struct pnv_phb *phb; const __be64 *prop64; u64 phb_id; int64_t rc; static int primary = 1; pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-phbid\" property !\n"); return; } phb_id = be64_to_cpup(prop64); pr_devel(" PHB-ID : 0x%016llx\n", phb_id); pr_devel(" TCE AT : 0x%016lx\n", __pa(tce_mem)); pr_devel(" TCE SZ : 0x%016llx\n", tce_size); rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size); if (rc != OPAL_SUCCESS) { pr_err(" Failed to set TCE memory, OPAL error %lld\n", rc); return; } phb = alloc_bootmem(sizeof(struct pnv_phb)); if (phb) { memset(phb, 0, sizeof(struct pnv_phb)); phb->hose = pcibios_alloc_controller(np); } if (!phb || !phb->hose) { pr_err(" Failed to allocate PCI controller\n"); return; } spin_lock_init(&phb->lock); phb->hose->first_busno = 0; phb->hose->last_busno = 0xff; phb->hose->private_data = phb; phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = PNV_PHB_P5IOC2; phb->model = PNV_PHB_MODEL_P5IOC2; phb->regs = of_iomap(np, 0); if (phb->regs == NULL) pr_err(" Failed to map registers !\n"); else { pr_devel(" P_BUID = 0x%08x\n", in_be32(phb->regs + 0x100)); pr_devel(" P_IOSZ = 0x%08x\n", in_be32(phb->regs + 0x1b0)); pr_devel(" P_IO_ST = 0x%08x\n", in_be32(phb->regs + 0x1e0)); pr_devel(" P_MEM1_H = 0x%08x\n", in_be32(phb->regs + 0x1a0)); pr_devel(" P_MEM1_L = 0x%08x\n", in_be32(phb->regs + 0x190)); pr_devel(" P_MSZ1_L = 0x%08x\n", in_be32(phb->regs + 0x1c0)); pr_devel(" P_MEM_ST = 0x%08x\n", in_be32(phb->regs + 0x1d0)); pr_devel(" P_MEM2_H = 0x%08x\n", in_be32(phb->regs + 0x2c0)); pr_devel(" P_MEM2_L = 0x%08x\n", in_be32(phb->regs + 0x2b0)); pr_devel(" P_MSZ2_H = 0x%08x\n", in_be32(phb->regs + 0x2d0)); pr_devel(" P_MSZ2_L = 0x%08x\n", in_be32(phb->regs + 0x2e0)); } /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(phb->hose, np, primary); primary = 0; phb->hose->ops = &pnv_pci_ops; /* Setup MSI support */ pnv_pci_init_p5ioc2_msis(phb); /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup; pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table, tce_mem, tce_size, 0); }
addr = (__be32 *) buf + desc[i].offset_words; val = (be32_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else if (desc[i].size_bits <= 64) { int shift; u64 val; u64 mask; __be64 *addr; shift = 64 - desc[i].offset_bits - desc[i].size_bits; mask = ((1ull << desc[i].size_bits) - 1) << shift; addr = (__be64 *) buf + desc[i].offset_words; val = (be64_to_cpup(addr) & mask) >> shift; value_write(desc[i].struct_offset_bytes, desc[i].struct_size_bytes, val, structure); } else { if (desc[i].offset_bits % 8 || desc[i].size_bits % 8) { printk(KERN_WARNING "Structure field %s of size %d " "bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits); } memcpy(structure + desc[i].struct_offset_bytes, buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8,
/* * Create the skb for the upper layer */ void xbee_rx(struct net_device *dev, unsigned char *data, int len, unsigned char* addr) { struct xbee_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct ieee802154_mac_cb* cb; struct ieee802154_hdr hdr; int hlen; int packet_stat; memset(&hdr.fc, 0, sizeof(hdr.fc)); skb = dev_alloc_skb(len+sizeof(struct ieee802154_hdr)); if (!skb) { if (printk_ratelimit()) printk(KERN_NOTICE "[NET] xbee rx: low on mem - packet dropped\n"); priv->stats.rx_dropped++; return; } skb_reserve(skb, len); skb->dev = dev; //skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IEEE802154); cb = mac_cb(skb); hdr.source.mode = IEEE802154_ADDR_LONG; hdr.source.extended_addr = be64_to_cpup((__be64*)addr); hdr.source.pan_id = xbee_get_pan_id(dev); //cb->source.mode = IEEE802154_ADDR_LONG; //cb->source.pan_id = xbee_get_pan_id(dev); //cb->source.extended_addr = be64_to_cpup((__be64*)addr); hdr.dest.mode = IEEE802154_ADDR_LONG; hdr.dest.extended_addr = IEEE802154_ADDR_BROADCAST; hdr.dest.pan_id = xbee_get_pan_id(dev); hlen = ieee802154_hdr_push(skb, &hdr); if (hlen < 0) return ; skb->mac_len = hlen; skb_reset_mac_header(skb); if (len > ieee802154_max_payload(&hdr)) return ; // Put all the data into the socket buffer memcpy(skb_push(skb, len), data, len); skb->ip_summed = CHECKSUM_UNNECESSARY; // don't check it (does this make any difference?) skb->len = len; packet_stat = netif_rx(skb); if(packet_stat == NET_RX_SUCCESS) { printk(KERN_ALERT "[NET] Packet received successfully\n"); } else if(packet_stat == NET_RX_DROP) { printk(KERN_ALERT "[NET] Packet was dropped!\n"); } printk(KERN_ALERT "MAC SRC addr %llx\n", hdr.source.extended_addr); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; }
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, void *tce_mem, u64 tce_size) { struct pnv_phb *phb; const __be64 *prop64; u64 phb_id; int64_t rc; static int primary = 1; struct iommu_table_group *table_group; struct iommu_table *tbl; pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-phbid\" property !\n"); return; } phb_id = be64_to_cpup(prop64); pr_devel(" PHB-ID : 0x%016llx\n", phb_id); pr_devel(" TCE AT : 0x%016lx\n", __pa(tce_mem)); pr_devel(" TCE SZ : 0x%016llx\n", tce_size); rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size); if (rc != OPAL_SUCCESS) { pr_err(" Failed to set TCE memory, OPAL error %lld\n", rc); return; } phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0); phb->hose = pcibios_alloc_controller(np); if (!phb->hose) { pr_err(" Failed to allocate PCI controller\n"); return; } spin_lock_init(&phb->lock); phb->hose->first_busno = 0; phb->hose->last_busno = 0xff; phb->hose->private_data = phb; phb->hose->controller_ops = pnv_pci_p5ioc2_controller_ops; phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = PNV_PHB_P5IOC2; phb->model = PNV_PHB_MODEL_P5IOC2; phb->regs = of_iomap(np, 0); if (phb->regs == NULL) pr_err(" Failed to map registers !\n"); else { pr_devel(" P_BUID = 0x%08x\n", in_be32(phb->regs + 0x100)); pr_devel(" P_IOSZ = 0x%08x\n", in_be32(phb->regs + 0x1b0)); pr_devel(" P_IO_ST = 0x%08x\n", in_be32(phb->regs + 0x1e0)); pr_devel(" P_MEM1_H = 0x%08x\n", in_be32(phb->regs + 0x1a0)); pr_devel(" P_MEM1_L = 0x%08x\n", in_be32(phb->regs + 0x190)); pr_devel(" P_MSZ1_L = 0x%08x\n", in_be32(phb->regs + 0x1c0)); pr_devel(" P_MEM_ST = 0x%08x\n", in_be32(phb->regs + 0x1d0)); pr_devel(" P_MEM2_H = 0x%08x\n", in_be32(phb->regs + 0x2c0)); pr_devel(" P_MEM2_L = 0x%08x\n", in_be32(phb->regs + 0x2b0)); pr_devel(" P_MSZ2_H = 0x%08x\n", in_be32(phb->regs + 0x2d0)); pr_devel(" P_MSZ2_L = 0x%08x\n", in_be32(phb->regs + 0x2e0)); } /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(phb->hose, np, primary); primary = 0; phb->hose->ops = &pnv_pci_ops; /* Setup MSI support */ pnv_pci_init_p5ioc2_msis(phb); /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup; pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table, tce_mem, tce_size, 0, IOMMU_PAGE_SHIFT_4K); /* * We do not allocate iommu_table as we do not support * hotplug or SRIOV on P5IOC2 and therefore iommu_free_table() * should not be called for phb->p5ioc2.table_group.tables[0] ever. */ tbl = phb->p5ioc2.table_group.tables[0] = &phb->p5ioc2.iommu_table; table_group = &phb->p5ioc2.table_group; table_group->tce32_start = tbl->it_offset << tbl->it_page_shift; table_group->tce32_size = tbl->it_size << tbl->it_page_shift; }
/* * The final 8 bytes of the buffer list is a counter of frames dropped * because there was not a buffer in the buffer list capable of holding * the frame. */ static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) { __be64 *p = adapter->buffer_list_addr + 4096 - 8; adapter->rx_no_buffer = be64_to_cpup(p); }