int cvmx_range_init (uint64_t range_addr, int size) { uint64_t i; uint64_t lsize = size; cvmx_write64_uint64(addr_of_size(range_addr),lsize); for (i = 0; i < lsize; i++) { cvmx_write64_uint64(addr_of_element(range_addr,i), CVMX_RANGE_AVAILABLE); } return 0; }
int cvmx_range_alloc_non_contiguos(uint64_t range_addr, uint64_t owner, uint64_t cnt, int elements[]) { uint64_t i=0, size; uint64_t element_index = 0; size = cvmx_read64_uint64(addr_of_size(range_addr)); for(i=0; i<size; i++) { uint64_t r_owner = cvmx_read64_uint64(addr_of_element(range_addr,i)); //cvmx_dprintf("index=%d owner=%llx\n", (int) i, (unsigned long long) r_owner); if (r_owner == CVMX_RANGE_AVAILABLE) { elements[element_index++] = (int) i; } if (element_index == cnt) break; } if (element_index != cnt) { cvmx_dprintf("ERROR: failed to allocate non contiguos cnt=%d" " available=%d\n", (int)cnt, (int) element_index); return -1; } for(i=0; i<cnt; i++) { uint64_t a = addr_of_element(range_addr,elements[i]); cvmx_write64_uint64(a, owner); } return 0; }
int cvmx_range_free_with_base(uint64_t range_addr, int base, int cnt) { uint64_t i, size; uint64_t up = base + cnt; size = cvmx_read64_uint64(addr_of_size(range_addr)); if (up >= size) { cvmx_dprintf("ERROR: invalid base or cnt size=%d base+cnt=%d \n", (int) size, (int)up); return -1; } for(i=base; i<up; i++) { cvmx_write64_uint64(addr_of_element(range_addr,i), CVMX_RANGE_AVAILABLE); } return 0; }
int cvmx_range_free_mutiple(uint64_t range_addr, int bases[], int count) { uint64_t i, cnt; cnt = count; if (__cvmx_range_is_allocated(range_addr, bases, count) != 1) { return -1; } for(i=0; i<cnt; i++) { uint64_t base = bases[i]; cvmx_write64_uint64(addr_of_element(range_addr, base), CVMX_RANGE_AVAILABLE); } return 0; }
int cvmx_range_free_with_owner(uint64_t range_addr, uint64_t owner) { uint64_t i, size; int found = -1; size = cvmx_read64_uint64(addr_of_size(range_addr)); for(i=0; i<size; i++) { uint64_t r_owner = cvmx_read64_uint64(addr_of_element(range_addr,i)); if (r_owner == owner) { cvmx_write64_uint64(addr_of_element(range_addr,i), CVMX_RANGE_AVAILABLE); found = 0; } } return found; }
int cvmx_range_alloc (uint64_t range_addr, uint64_t owner, uint64_t cnt, int align) { uint64_t i=0, size; int64_t first_available; //cvmx_dprintf("%s: range_addr=%llx owner=%llx cnt=%d \n", __FUNCTION__, // (unsigned long long) range_addr, (unsigned long long) owner, (int)cnt); size = cvmx_read64_uint64(addr_of_size(range_addr)); //cvmx_dprintf("%s: size=%d\n", __FUNCTION__, size); while(i<size) { uint64_t available_cnt=0; first_available = cvmx_range_find_next_available(range_addr, i, align); if (first_available == -1) return -1; i = first_available; //cvmx_dprintf("%s: first_available=%d \n", __FUNCTION__, (int) first_available); while((available_cnt != cnt) && (i < size)) { uint64_t r_owner = cvmx_read64_uint64(addr_of_element(range_addr,i)); if (r_owner == CVMX_RANGE_AVAILABLE) available_cnt++; i++; } if (available_cnt == cnt) { //cvmx_dprintf("%s: first_available=%d available=%d \n", __FUNCTION__, // (int) first_available, (int) available_cnt); uint64_t j; for(j=first_available; j < first_available + cnt; j++) { uint64_t a = addr_of_element(range_addr,j); //cvmx_dprintf("%s: j=%d a=%llx \n", __FUNCTION__, (int) j, (unsigned long long) a); cvmx_write64_uint64(a, owner); } return first_available; } } cvmx_dprintf("ERROR: failed to allocate range cnt=%d \n", (int)cnt); cvmx_range_show(range_addr); return -1; }
int cvmx_range_reserve(uint64_t range_addr, uint64_t owner, uint64_t base, uint64_t cnt ) { uint64_t i, size, r_owner; uint64_t up = base + cnt; size = cvmx_read64_uint64(addr_of_size(range_addr)); if (up >= size) { cvmx_dprintf("ERROR: invalid base or cnt size=%d base+cnt=%d \n", (int) size, (int)up); return -1; } for(i=base; i<up; i++) { r_owner = cvmx_read64_uint64(addr_of_element(range_addr,i)); //cvmx_dprintf("%d: %llx\n", (int) i,(unsigned long long) r_owner); if (r_owner != CVMX_RANGE_AVAILABLE) { cvmx_dprintf("ERROR: failed to reserve base+cnt=%d \n", (int)i); cvmx_range_show(range_addr); return -1; } } for(i=base; i<up; i++) { cvmx_write64_uint64(addr_of_element(range_addr,i), owner); } return base; }
static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next) { cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next); }
static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size) { cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size); }
void init_octeon_pcie(void) { int first_busno; int i; int rc = 0; int node = cvmx_get_node_num(); struct pci_controller *hose; int pcie_port; first_busno = OCTEON_FIRST_PCIE_BUSNO; memset(&hose_pcie[0], 0, sizeof(hose_pcie[0]) * num_pcie_ports); debug("Starting PCIE on node %d\n", node); for (i = 0; i < num_pcie_ports; i++) { pcie_port = ((node << 4) | i); rc = cvmx_pcie_rc_initialize(pcie_port); if (rc != 0) continue; mdelay(1000); /* Should delay 1 second according to standard */ hose = &hose_pcie[i]; hose->priv_data = (void *)&oct_pcie_data[i]; oct_pcie_data[i].pcie_port = pcie_port;; hose->config_table = pci_board_config_table; hose->first_busno = first_busno; hose->last_busno = 0xff; /* PCI I/O space (Sub-DID == 2) */ pci_set_region(hose->regions + 0, octeon_pcie_region_info[i].io_base, octeon_pcie_region_info[i].io_base, octeon_pcie_region_info[i].io_size, PCI_REGION_IO); /* PCI memory space (Sub-DID == 3) */ pci_set_region(hose->regions + 1, octeon_pcie_region_info[i].mem_base, octeon_pcie_region_info[i].mem_base, octeon_pcie_region_info[i].mem_size, PCI_REGION_MEM); hose->region_count = 2; pci_set_ops(hose, octeon_pcie_read_config_byte, octeon_pcie_read_config_word, octeon_pcie_read_config_dword, octeon_pcie_write_config_byte, octeon_pcie_write_config_word, octeon_pcie_write_config_dword); pci_register_hose(hose); hose->last_busno = pci_hose_scan(hose); debug("PCIe: port=%d, first_bus=%d, last_bus=%d,\n\t" "mem_base=0x%x, mem_size=0x%x, io_base=0x%x, io_size=0x%x\n", octeon_get_pcie_port(hose), hose->first_busno, hose->last_busno, octeon_pcie_region_info[i].mem_base, octeon_pcie_region_info[i].mem_size, octeon_pcie_region_info[i].io_base, octeon_pcie_region_info[i].io_size); first_busno = hose->last_busno + 1; #if CONFIG_OCTEON_PCI_ENABLE_32BIT_MAPPING if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { cvmx_pemx_bar_ctl_t bar_ctl; cvmx_pemx_bar1_indexx_t pemx_bar1_indexx; uint64_t bar_base; int j; /* Setup BAR1 to map bus address 0x0 to the base of * u-boot's TLB mapping. This allows us to have u-boot * located anywhere in memory (including above 32 bit * addressable space) and still have 32 bit PCI devices * have access to memory that is statically allocated * or malloced by u-boot, both of which are TLB mapped. */ cvmx_write_csr_node(node, CVMX_PEMX_P2N_BAR1_START(i), 0); /* Disable bar0/bar2, as we are not using them here */ cvmx_write_csr_node(node, CVMX_PEMX_P2N_BAR0_START(i), -1); cvmx_write_csr_node(node, CVMX_PEMX_P2N_BAR2_START(i), -1); /* Select 64 MByte mapping size for bar 1 on * all ports */ bar_ctl.u64 = cvmx_read_csr_node(node, CVMX_PEMX_BAR_CTL(i)); bar_ctl.s.bar1_siz = 1; /* 64MB */ bar_ctl.s.bar2_enb = 0; cvmx_write_csr_node(node, CVMX_PEMX_BAR_CTL(i), bar_ctl.u64); /* Configure the regions in bar 1 to map to the * DRAM used by u-boot. */ /* Round down to 4MByte boundary to meet BAR mapping * requirements */ bar_base = gd->bd->bi_uboot_ram_addr & ~0x3fffffull; debug("pcie: port %d, setting BAR base to 0x%llx\n", i, bar_base); pemx_bar1_indexx.u64 = 0; pemx_bar1_indexx.s.addr_v = 1; pemx_bar1_indexx.s.end_swp = 1; pemx_bar1_indexx.s.ca = 1; for (j = 0; j < 16; j++) { pemx_bar1_indexx.s.addr_idx = (bar_base + 4 * 1024 * 1024 * j) >> 22; cvmx_write64_uint64(CVMX_PEMX_BAR1_INDEXX(j, i), pemx_bar1_indexx.u64); } } #endif /* CONFIG_OCTEON_PCI_ENABLE_32BIT_MAPPING */ }