/** * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov * @adapter: board private structure to initialize * * SR-IOV doesn't use any descriptor rings but changes the default if * no other mapping is used. * */ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) { #ifdef IXGBE_FCOE struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; int i; u16 reg_idx; /* only proceed if VMDq is enabled */ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { #ifdef IXGBE_FCOE /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; #endif /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= rss->indices) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->rx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_rx_queues; i++, reg_idx++) adapter->rx_ring[i]->reg_idx = reg_idx; #endif reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { #ifdef IXGBE_FCOE /* Allow first FCoE queue to be mapped as RSS */ if (fcoe->offset && (i > fcoe->offset)) break; #endif /* If we are greater than indices move to next pool */ if ((reg_idx & rss->mask) >= rss->indices) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->tx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* FCoE uses a linear block of queues so just assigning 1:1 */ for (; i < adapter->num_tx_queues; i++, reg_idx++) adapter->tx_ring[i]->reg_idx = reg_idx; #endif return true; }
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) { struct llist_node *free; struct tty_buffer *p; /* Round the buffer size out */ size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); if (size <= MIN_TTYB_SIZE) { free = llist_del_first(&port->buf.free); if (free) { p = llist_entry(free, struct tty_buffer, free); goto found; } }
/** * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices * @adapter: board private structure to initialize * * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues * and VM pools where appropriate. If RSS is available, then also try and * enable RSS and map accordingly. * **/ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) { u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; u16 rss_m = IXGBE_RSS_DISABLED_MASK; #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; /* double check we are limited to maximum pools */ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ if ((vmdq_i > 32) || (rss_i < 4)) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); /* 32 pool mode with 4 queues per pool */ } else { vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; rss_m = IXGBE_RSS_4Q_MASK; rss_i = 4; } #ifdef IXGBE_FCOE /* queues in the remaining pools are available for FCoE */ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); #endif /* remove the starting offset from the pool count */ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; /* limit RSS based on user input and save for later use */ adapter->ring_feature[RING_F_RSS].indices = rss_i; adapter->ring_feature[RING_F_RSS].mask = rss_m; adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = rss_i; adapter->num_rx_queues = vmdq_i * rss_i; adapter->num_tx_queues = vmdq_i * rss_i; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; #ifdef IXGBE_FCOE /* * FCoE can use rings from adjacent buffers to allow RSS * like behavior. To account for this we need to add the * FCoE indices to the total ring count. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (vmdq_i > 1 && fcoe_i) { /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * rss_i; } else { /* merge FCoE queues with RSS queues */ fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); /* limit indices to rss_i if MSI-X is disabled */ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) fcoe_i = rss_i; /* attempt to reserve some queues for just FCoE */ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); fcoe->offset = fcoe_i - fcoe->indices; fcoe_i -= rss_i; } /* add queues to adapter */ adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } #endif return true; }
/** * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV * @adapter: board private structure to initialize * * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It * will also try to cache the proper offsets if RSS/FCoE are enabled along * with VMDq. * **/ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) { #ifdef IXGBE_FCOE struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; int i; u16 reg_idx; u8 tcs = netdev_get_num_tc(adapter->netdev); /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* start at VMDq register offset for SR-IOV enabled setups */ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->rx_ring[i]->reg_idx = reg_idx; } reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { /* If we are greater than indices move to next pool */ if ((reg_idx & ~vmdq->mask) >= tcs) reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); adapter->tx_ring[i]->reg_idx = reg_idx; } #ifdef IXGBE_FCOE /* nothing to do if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return true; /* The work is already done if the FCoE ring is shared */ if (fcoe->offset < tcs) return true; /* The FCoE rings exist separately, we need to move their reg_idx */ if (fcoe->indices) { u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->rx_ring[i]->reg_idx = reg_idx; reg_idx++; } reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; adapter->tx_ring[i]->reg_idx = reg_idx; reg_idx++; } } #endif /* IXGBE_FCOE */ return true; }
/** * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB * @adapter: board private structure to initialize * * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues * and VM pools where appropriate. Also assign queues based on DCB * priorities and map accordingly.. * **/ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) { int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; u16 vmdq_m = 0; #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif u8 tcs = netdev_get_num_tc(adapter->netdev); /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) return false; /* verify we have VMDq enabled before proceeding */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; /* 16 pools w/ 8 TC per pool */ if (tcs > 4) { vmdq_i = min_t(u16, vmdq_i, 16); vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; /* 32 pools w/ 4 TC per pool */ } else { vmdq_i = min_t(u16, vmdq_i, 32); vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; } #ifdef IXGBE_FCOE /* queues in the remaining pools are available for FCoE */ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; #endif /* remove the starting offset from the pool count */ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; /* save features for later use */ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; /* * We do not support DCB, VMDq, and RSS all simultaneously * so we will disable RSS since it is the lowest priority */ adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; /* disable ATR as it is not supported when VMDq is enabled */ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; adapter->num_tx_queues = vmdq_i * tcs; adapter->num_rx_queues = vmdq_i * tcs; #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { struct ixgbe_ring_feature *fcoe; fcoe = &adapter->ring_feature[RING_F_FCOE]; /* limit ourselves based on feature limits */ fcoe_i = min_t(u16, fcoe_i, fcoe->limit); if (fcoe_i) { /* alloc queues for FCoE separately */ fcoe->indices = fcoe_i; fcoe->offset = vmdq_i * tcs; /* add queues to adapter */ adapter->num_tx_queues += fcoe_i; adapter->num_rx_queues += fcoe_i; } else if (tcs > 1) { /* use queue belonging to FcoE TC */ fcoe->indices = 1; fcoe->offset = ixgbe_fcoe_get_tc(adapter); } else { adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; fcoe->indices = 0; fcoe->offset = 0; } } #endif /* IXGBE_FCOE */ /* configure TC to queue mapping */ for (i = 0; i < tcs; i++) netdev_set_tc_queue(adapter->netdev, i, 1, i); return true; }
static void mali_meson_poweron(int first_poweron) { unsigned long flags; u32 p, p_aligned; dma_addr_t p_phy; int i; unsigned int_mask; if(!first_poweron) { if ((last_power_mode != -1) && (last_power_mode != MALI_POWER_MODE_DEEP_SLEEP)) { MALI_DEBUG_PRINT(3, ("Maybe your system not deep sleep now.......\n")); //printk("Maybe your system not deep sleep now.......\n"); return; } } MALI_DEBUG_PRINT(2, ("mali_meson_poweron: Mali APB bus accessing\n")); if (READ_MALI_REG(MALI_PP_PP_VERSION) != MALI_PP_PP_VERSION_MAGIC) { MALI_DEBUG_PRINT(3, ("mali_meson_poweron: Mali APB bus access failed\n")); //printk("mali_meson_poweron: Mali APB bus access failed."); return; } MALI_DEBUG_PRINT(2, ("..........accessing done.\n")); if (READ_MALI_REG(MALI_MMU_DTE_ADDR) != 0) { MALI_DEBUG_PRINT(3, ("mali_meson_poweron: Mali is not really powered off\n")); //printk("mali_meson_poweron: Mali is not really powered off."); return; } p = (u32)kcalloc(4096 * 4, 1, GFP_KERNEL); if (!p) { printk("mali_meson_poweron: NOMEM in meson_poweron\n"); return; } p_aligned = __ALIGN_MASK(p, 4096); /* DTE */ *(u32 *)(p_aligned) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_PTE) | MMU_FLAG_DTE_PRESENT; /* PTE */ for (i=0; i<1024; i++) { *(u32 *)(p_aligned + OFFSET_MMU_PTE + i*4) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_VIRTUAL_ZERO + 4096 * i) | MMU_FLAG_PTE_PAGE_PRESENT | MMU_FLAG_PTE_RD_PERMISSION; } /* command & data */ memcpy((void *)(p_aligned + OFFSET_MMU_VIRTUAL_ZERO), poweron_data, 4096); p_phy = dma_map_single(NULL, (void *)p_aligned, 4096 * 3, DMA_TO_DEVICE); /* Set up Mali GP MMU */ WRITE_MALI_REG(MALI_MMU_DTE_ADDR, p_phy); WRITE_MALI_REG(MALI_MMU_CMD, 0); if ((READ_MALI_REG(MALI_MMU_STATUS) & 1) != 1) printk("mali_meson_poweron: MMU enabling failed.\n"); /* Set up Mali command registers */ WRITE_MALI_REG(MALI_APB_GP_VSCL_START, 0); WRITE_MALI_REG(MALI_APB_GP_VSCL_END, 0x38); spin_lock_irqsave(&lock, flags); int_mask = READ_MALI_REG(MALI_APB_GP_INT_MASK); WRITE_MALI_REG(MALI_APB_GP_INT_CLEAR, 0x707bff); WRITE_MALI_REG(MALI_APB_GP_INT_MASK, 0); /* Start GP */ WRITE_MALI_REG(MALI_APB_GP_CMD, 1); for (i = 0; i<100; i++) udelay(500); /* check Mali GP interrupt */ if (READ_MALI_REG(MALI_APB_GP_INT_RAWSTAT) & 0x707bff) printk("mali_meson_poweron: Interrupt received.\n"); else printk("mali_meson_poweron: No interrupt received.\n"); /* force reset GP */ WRITE_MALI_REG(MALI_APB_GP_CMD, 1 << 5); /* stop MMU paging and reset */ WRITE_MALI_REG(MALI_MMU_CMD, 1); WRITE_MALI_REG(MALI_MMU_CMD, 6); for (i = 0; i<100; i++) udelay(500); WRITE_MALI_REG(MALI_APB_GP_INT_CLEAR, 0x3ff); WRITE_MALI_REG(MALI_MMU_INT_CLEAR, INT_ALL); WRITE_MALI_REG(MALI_MMU_INT_MASK, 0); WRITE_MALI_REG(MALI_APB_GP_INT_CLEAR, 0x707bff); WRITE_MALI_REG(MALI_APB_GP_INT_MASK, int_mask); spin_unlock_irqrestore(&lock, flags); dma_unmap_single(NULL, p_phy, 4096 * 3, DMA_TO_DEVICE); kfree((void *)p); /* Mali revision detection */ if (last_power_mode == -1) mali_revb_flag = mali_meson_is_revb(); }
void mali_meson_poweron(void) { unsigned long flags; u32 p, p_aligned; dma_addr_t p_phy; int i; unsigned int_mask; if ((last_power_mode != -1) && (last_power_mode != MALI_POWER_MODE_DEEP_SLEEP)) { return; } if (READ_MALI_REG(MALI_PP_PP_VERSION) != MALI_PP_PP_VERSION_MAGIC) { printk("mali_meson_poweron: Mali APB bus access failed."); return; } if (READ_MALI_REG(MALI_MMU_DTE_ADDR) != 0) { printk("mali_meson_poweron: Mali is not really powered off."); return; } p = (u32)kcalloc(4096 * 4, 1, GFP_KERNEL); if (!p) { printk("mali_meson_poweron: NOMEM in meson_poweron\n"); return; } p_aligned = __ALIGN_MASK(p, 4096); /* DTE */ *(u32 *)(p_aligned) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_PTE) | MMU_FLAG_DTE_PRESENT; /* PTE */ for (i=0; i<1024; i++) { *(u32 *)(p_aligned + OFFSET_MMU_PTE + i*4) = (virt_to_phys((void *)p_aligned) + OFFSET_MMU_VIRTUAL_ZERO + 4096 * i) | MMU_FLAG_PTE_PAGE_PRESENT | MMU_FLAG_PTE_RD_PERMISSION; } /* command & data */ memcpy((void *)(p_aligned + OFFSET_MMU_VIRTUAL_ZERO), poweron_data, 4096); p_phy = dma_map_single(NULL, (void *)p_aligned, 4096 * 3, DMA_TO_DEVICE); /* Set up Mali GP MMU */ WRITE_MALI_REG(MALI_MMU_DTE_ADDR, p_phy); WRITE_MALI_REG(MALI_MMU_CMD, 0); if ((READ_MALI_REG(MALI_MMU_STATUS) & 1) != 1) { printk("mali_meson_poweron: MMU enabling failed.\n"); } /* Set up Mali command registers */ WRITE_MALI_REG(MALI_APB_GP_VSCL_START, 0); WRITE_MALI_REG(MALI_APB_GP_VSCL_END, 0x38); WRITE_MALI_REG(MALI_APB_GP_INT_MASK, 0x3ff); spin_lock_irqsave(&lock, flags); int_mask = READ_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK); /* Set up ARM Mali interrupt */ WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16); SET_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16); /* Start GP */ WRITE_MALI_REG(MALI_APB_GP_CMD, 1); for (i = 0; i<100; i++) udelay(500); /* check Mali GP interrupt */ if (READ_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT) & (1<<16)) { printk("mali_meson_poweron: Interrupt received.\n"); } else { printk("mali_meson_poweron: No interrupt received.\n"); } WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_STAT_CLR, 1 << 16); CLEAR_CBUS_REG_MASK(A9_0_IRQ_IN1_INTR_MASK, 1 << 16); /* force reset GP */ WRITE_MALI_REG(MALI_APB_GP_CMD, 1 << 5); /* stop MMU paging and reset */ WRITE_MALI_REG(MALI_MMU_CMD, 1); WRITE_MALI_REG(MALI_MMU_CMD, 1 << 6); WRITE_CBUS_REG(A9_0_IRQ_IN1_INTR_MASK, int_mask); spin_unlock_irqrestore(&lock, flags); dma_unmap_single(NULL, p_phy, 4096 * 3, DMA_TO_DEVICE); kfree((void *)p); }
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags) { int64_t addr_allocated; struct cvmx_bootmem_named_block_desc *named_block_desc_ptr; #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: " "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n", (unsigned long long)size, (unsigned long long)min_addr, (unsigned long long)max_addr, (unsigned long long)alignment, name); #endif if (cvmx_bootmem_desc->major_version != 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " "%d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); return -1; } /* * Take lock here, as name lookup/block alloc/name add need to * be atomic. */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); /* Get pointer to first available named block descriptor */ named_block_desc_ptr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); /* * Check to see if name already in use, return error if name * not available or no more room for blocks. */ if (cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) { if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); return -1; } /* * Round size up to mult of minimum alignment bytes We need * the actual size allocated to allow for blocks to be * coallesced when they are freed. The alloc routine does the * same rounding up on all allocations. */ size = __ALIGN_MASK(size, (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)); addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); if (addr_allocated >= 0) { named_block_desc_ptr->base_addr = addr_allocated; named_block_desc_ptr->size = size; strncpy(named_block_desc_ptr->name, name, cvmx_bootmem_desc->named_block_name_len); named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0; } if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); return addr_allocated; }
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags) { uint64_t head_addr; uint64_t ent_addr; /* points to previous list entry, NULL current entry is head of list */ uint64_t prev_addr = 0; uint64_t new_ent_addr = 0; uint64_t desired_min_addr; #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, " "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n", (unsigned long long)req_size, (unsigned long long)address_min, (unsigned long long)address_max, (unsigned long long)alignment); #endif if (cvmx_bootmem_desc->major_version > 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor " "version: %d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); goto error_out; } /* * Do a variety of checks to validate the arguments. The * allocator code will later assume that these checks have * been made. We validate that the requested constraints are * not self-contradictory before we look through the list of * available memory. */ /* 0 is not a valid req_size for this allocator */ if (!req_size) goto error_out; /* Round req_size up to mult of minimum alignment bytes */ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1); /* * Convert !0 address_min and 0 address_max to special case of * range that specifies an exact memory block to allocate. Do * this before other checks and adjustments so that this * tranformation will be validated. */ if (address_min && !address_max) address_max = address_min + req_size; else if (!address_min && !address_max) address_max = ~0ull; /* If no limits given, use max limits */ /* * Enforce minimum alignment (this also keeps the minimum free block * req_size the same as the alignment req_size. */ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE) alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE; /* * Adjust address minimum based on requested alignment (round * up to meet alignment). Do this here so we can reject * impossible requests up front. (NOP for address_min == 0) */ if (alignment) address_min = __ALIGN_MASK(address_min, (alignment - 1)); /* * Reject inconsistent args. We have adjusted these, so this * may fail due to our internal changes even if this check * would pass for the values the user supplied. */ if (req_size > address_max - address_min) goto error_out; /* Walk through the list entries - first fit found is returned */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_lock(); head_addr = cvmx_bootmem_desc->head_addr; ent_addr = head_addr; for (; ent_addr; prev_addr = ent_addr, ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) { uint64_t usable_base, usable_max; uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr); if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) { cvmx_dprintf("Internal bootmem_alloc() error: ent: " "0x%llx, next: 0x%llx\n", (unsigned long long)ent_addr, (unsigned long long) cvmx_bootmem_phy_get_next(ent_addr)); goto error_out; } /* * Determine if this is an entry that can satisify the * request Check to make sure entry is large enough to * satisfy request. */ usable_base = __ALIGN_MASK(max(address_min, ent_addr), alignment - 1); usable_max = min(address_max, ent_addr + ent_size); /* * We should be able to allocate block at address * usable_base. */ desired_min_addr = usable_base; /* * Determine if request can be satisfied from the * current entry. */ if (!((ent_addr + ent_size) > usable_base && ent_addr < address_max && req_size <= usable_max - usable_base)) continue; /* * We have found an entry that has room to satisfy the * request, so allocate it from this entry. If end * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from * the end of this block rather than the beginning. */ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) { desired_min_addr = usable_max - req_size; /* * Align desired address down to required * alignment. */ desired_min_addr &= ~(alignment - 1); } /* Match at start of entry */ if (desired_min_addr == ent_addr) { if (req_size < ent_size) { /* * big enough to create a new block * from top portion of block. */ new_ent_addr = ent_addr + req_size; cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr)); cvmx_bootmem_phy_set_size(new_ent_addr, ent_size - req_size); /* * Adjust next pointer as following * code uses this. */ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr); } /* * adjust prev ptr or head to remove this * entry from list. */ if (prev_addr) cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(ent_addr)); else /* * head of list being returned, so * update head ptr. */ cvmx_bootmem_desc->head_addr = cvmx_bootmem_phy_get_next(ent_addr); if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return desired_min_addr; } /* * block returned doesn't start at beginning of entry, * so we know that we will be splitting a block off * the front of this one. Create a new block from the * beginning, add to list, and go to top of loop * again. * * create new block from high portion of * block, so that top block starts at desired * addr. */ new_ent_addr = desired_min_addr; cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next (ent_addr)); cvmx_bootmem_phy_set_size(new_ent_addr, cvmx_bootmem_phy_get_size (ent_addr) - (desired_min_addr - ent_addr)); cvmx_bootmem_phy_set_size(ent_addr, desired_min_addr - ent_addr); cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr); /* Loop again to handle actual alloc from new block */ } error_out: /* We didn't find anything, so return error */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return -1; }