asmlinkage void *car_stage_c_entry(void) { struct postcar_frame pcf; bool s3wake; uintptr_t top_of_ram; uintptr_t top_of_low_usable_memory; post_code(0x20); console_init(); /* Initialize DRAM */ s3wake = fill_power_state() == ACPI_S3; fsp_memory_init(s3wake); /* Disable the ROM shadow 0x000e0000 - 0x000fffff */ disable_rom_shadow(); /* Initialize the PCIe bridges */ pcie_init(); if (postcar_frame_init(&pcf, 1*KiB)) die("Unable to initialize postcar frame.\n"); /* Locate the top of RAM */ top_of_low_usable_memory = (uintptr_t) cbmem_top(); top_of_ram = ALIGN(top_of_low_usable_memory, 16 * MiB); /* Cache postcar and ramstage */ postcar_frame_add_mtrr(&pcf, top_of_ram - (16 * MiB), 16 * MiB, MTRR_TYPE_WRBACK); /* Cache RMU area */ postcar_frame_add_mtrr(&pcf, (uintptr_t) top_of_low_usable_memory, 0x10000, MTRR_TYPE_WRTHROUGH); /* Cache ESRAM */ postcar_frame_add_mtrr(&pcf, 0x80000000, 0x80000, MTRR_TYPE_WRBACK); /* Cache SPI flash - Write protect not supported */ postcar_frame_add_mtrr(&pcf, (uint32_t)(-CONFIG_ROM_SIZE), CONFIG_ROM_SIZE, MTRR_TYPE_WRTHROUGH); run_postcar_phase(&pcf); return NULL; }
void hw_init(void) { tz_init(); printk(BIOS_INFO, "trustzone initialized\n"); dmac_init(); printk(BIOS_INFO, "PL330 DMAC initialized\n"); lcd_init(); lcd_qos_init(15); printk(BIOS_INFO, "LCD initialized\n"); v3d_init(); printk(BIOS_INFO, "V3D initialized\n"); audio_init(); printk(BIOS_INFO, "audio initialized\n"); neon_init(); printk(BIOS_INFO, "neon initialized\n"); pcie_init(); printk(BIOS_INFO, "PCIe initialized\n"); M0_init(); printk(BIOS_INFO, "M0 initialized\n"); ccu_init(); printk(BIOS_INFO, "CCU initialized\n"); sdio_init(); printk(BIOS_INFO, "SDIO initialized\n"); }
/*ARGSUSED*/ static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { px_t *px_p; /* per bus state pointer */ int instance = DIP_TO_INST(dip); int ret = DDI_SUCCESS; devhandle_t dev_hdl = NULL; pcie_hp_regops_t regops; pcie_bus_t *bus_p; switch (cmd) { case DDI_ATTACH: DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); /* See pci_cfgacc.c */ pci_cfgacc_acc_p = pci_cfgacc_acc; /* * Allocate and get the per-px soft state structure. */ if (ddi_soft_state_zalloc(px_state_p, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d: can't allocate px state", ddi_driver_name(dip), instance); goto err_bad_px_softstate; } px_p = INST_TO_STATE(instance); px_p->px_dip = dip; mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "device_type", "pciex"); /* Initialize px_dbg for high pil printing */ px_dbg_attach(dip, &px_p->px_dbg_hdl); pcie_rc_init_bus(dip); /* * Get key properties of the pci bridge node and * determine it's type (psycho, schizo, etc ...). */ if (px_get_props(px_p, dip) == DDI_FAILURE) goto err_bad_px_prop; if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) goto err_bad_dev_init; /* Initialize device handle */ px_p->px_dev_hdl = dev_hdl; /* Cache the BDF of the root port nexus */ px_p->px_bdf = px_lib_get_bdf(px_p); /* * Initialize interrupt block. Note that this * initialize error handling for the PEC as well. */ if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) goto err_bad_ib; if (px_cb_attach(px_p) != DDI_SUCCESS) goto err_bad_cb; /* * Start creating the modules. * Note that attach() routines should * register and enable their own interrupts. */ if ((px_mmu_attach(px_p)) != DDI_SUCCESS) goto err_bad_mmu; if ((px_msiq_attach(px_p)) != DDI_SUCCESS) goto err_bad_msiq; if ((px_msi_attach(px_p)) != DDI_SUCCESS) goto err_bad_msi; if ((px_pec_attach(px_p)) != DDI_SUCCESS) goto err_bad_pec; if ((px_dma_attach(px_p)) != DDI_SUCCESS) goto err_bad_dma; /* nothing to uninitialize on DMA */ if ((px_fm_attach(px_p)) != DDI_SUCCESS) goto err_bad_dma; /* * All of the error handlers have been registered * by now so it's time to activate all the interrupt. */ if ((px_enable_err_intr(px_p)) != DDI_SUCCESS) goto err_bad_intr; if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; } (void) px_set_mps(px_p); if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) goto err_bad_hotplug; (void) pcie_hpintr_enable(dip); if (pxtool_init(dip) != DDI_SUCCESS) goto err_bad_pcitool_node; /* * power management setup. Even if it fails, attach will * succeed as this is a optional feature. Since we are * always at full power, this is not critical. */ if (pwr_common_setup(dip) != DDI_SUCCESS) { DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); } else if (px_pwr_setup(dip) != DDI_SUCCESS) { DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); pwr_common_teardown(dip); } /* * add cpr callback */ px_cpr_add_callb(px_p); /* * do fabric sync in case we don't need to wait for * any bridge driver to be ready */ (void) px_lib_fabric_sync(dip); ddi_report_dev(dip); px_p->px_state = PX_ATTACHED; /* * save base addr in bus_t for pci_cfgacc_xxx(), this * depends of px structure being properly initialized. */ bus_p = PCIE_DIP2BUS(dip); bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip); /* * Partially populate bus_t for all devices in this fabric * for device type macros to work. */ /* * Populate bus_t for all devices in this fabric, after FMA * is initializated, so that config access errors could * trigger panic. */ pcie_fab_init_bus(dip, PCIE_BUS_ALL); DBG(DBG_ATTACH, dip, "attach success\n"); break; err_bad_pcitool_node: (void) pcie_hpintr_disable(dip); (void) pcie_uninit(dip); err_bad_hotplug: (void) px_lib_hotplug_uninit(dip); px_disable_err_intr(px_p); err_bad_intr: px_fm_detach(px_p); err_bad_dma: px_pec_detach(px_p); err_bad_pec: px_msi_detach(px_p); err_bad_msi: px_msiq_detach(px_p); err_bad_msiq: px_mmu_detach(px_p); err_bad_mmu: err_bad_cb: px_ib_detach(px_p); err_bad_ib: if (px_lib_dev_fini(dip) != DDI_SUCCESS) { DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); } err_bad_dev_init: px_free_props(px_p); err_bad_px_prop: pcie_rc_fini_bus(dip); px_dbg_detach(dip, &px_p->px_dbg_hdl); mutex_destroy(&px_p->px_mutex); ddi_soft_state_free(px_state_p, instance); err_bad_px_softstate: ret = DDI_FAILURE; break; case DDI_RESUME: DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); px_p = INST_TO_STATE(instance); mutex_enter(&px_p->px_mutex); /* suspend might have not succeeded */ if (px_p->px_state != PX_SUSPENDED) { DBG(DBG_ATTACH, px_p->px_dip, "instance NOT suspended\n"); ret = DDI_FAILURE; break; } px_msiq_resume(px_p); px_lib_resume(dip); (void) pcie_pwr_resume(dip); px_p->px_state = PX_ATTACHED; mutex_exit(&px_p->px_mutex); break; default: DBG(DBG_ATTACH, dip, "unsupported attach op\n"); ret = DDI_FAILURE; break; } return (ret); }
int main(int argc, char *const argv[]) { int ret; unsigned clusters = 0; unsigned n_clusters = 0; int opt; while ((opt = getopt(argc, argv, "c:h")) != -1) { switch (opt) { case 'c': { unsigned mask = 1 << atoi(optarg); if ((clusters & mask) == 0) n_clusters ++; clusters |= mask; } break; case 'h': printf("Usage: %s [ -c <clus_id> -c <clus_id> -c ... ]", argv[0]); exit(0); break; default: /* '?' */ fprintf(stderr, "Wrong arguments\n"); return -1; } } if (clusters == 0) { clusters = 0xffff; n_clusters = 16; } ret = pcie_init(MPPA_PCIE_ETH_IF_MAX, 0); if (ret != 0) { fprintf(stderr, "Failed to initialize PCIe eth interface\n"); exit(1); } ret = odp_rpc_server_start(); if (ret) { fprintf(stderr, "[RPC] Error: Failed to start server\n"); exit(EXIT_FAILURE); } if ( __k1_get_cluster_id() == 128 ) { printf("Spawning clusters\n"); { static char const * _argv[] = { "odp_l2fwd.kelf", "-i", "p0p0:tags=120,p1p0:tags=120", "-m", "0", "-s", "0", "-c", "10", NULL }; while(clusters) { int clus_id = __builtin_k1_ctz(clusters); clusters &= ~ (1 << clus_id); boot_cluster(clus_id, _argv[0], _argv); } } printf("Cluster booted\n"); } join_clusters(NULL); return 0; }
/*ARGSUSED*/ static int ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { dev_info_t *root = ddi_root_node(); int instance; ppb_devstate_t *ppb; dev_info_t *pdip; ddi_acc_handle_t config_handle; char *bus; switch (cmd) { case DDI_ATTACH: /* * Make sure the "device_type" property exists. */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", "pci"); /* * Allocate and get soft state structure. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, instance); ppb->dip = devi; mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL); ppb->ppb_soft_state = PCI_SOFT_STATE_CLOSED; if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) { mutex_destroy(&ppb->ppb_mutex); ddi_soft_state_free(ppb_state, instance); return (DDI_FAILURE); } ppb_pwr_setup(ppb, devi); if (PM_CAPABLE(ppb->ppb_pwr_p)) { mutex_enter(&ppb->ppb_pwr_p->pwr_mutex); /* * Before reading config registers, make sure power is * on, and remains on. */ ppb->ppb_pwr_p->pwr_fp++; pci_pwr_change(ppb->ppb_pwr_p, ppb->ppb_pwr_p->current_lvl, pci_pwr_new_lvl(ppb->ppb_pwr_p)); } ppb->ppb_cache_line_size = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ); ppb->ppb_latency_timer = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER); /* * Check whether the "ranges" property is present. * Otherwise create the ranges property by reading * the configuration registers */ if (ddi_prop_exists(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, "ranges") == 0) { ppb_create_ranges_prop(devi, config_handle); } pci_config_teardown(&config_handle); if (PM_CAPABLE(ppb->ppb_pwr_p)) { ppb->ppb_pwr_p->pwr_fp--; pci_pwr_change(ppb->ppb_pwr_p, ppb->ppb_pwr_p->current_lvl, pci_pwr_new_lvl(ppb->ppb_pwr_p)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); } ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; for (pdip = ddi_get_parent(ppb->dip); pdip && (pdip != root) && (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV); pdip = ddi_get_parent(pdip)) { if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "device_type", &bus) != DDI_PROP_SUCCESS) break; if (strcmp(bus, "pciex") == 0) ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCIE_DEV; ddi_prop_free(bus); } /* * Initialize hotplug support on this bus. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) if (pcie_init(devi, NULL) != DDI_SUCCESS) { (void) ppb_detach(devi, DDI_DETACH); return (DDI_FAILURE); } else ppb_init_hotplug(ppb); DEBUG1(DBG_ATTACH, devi, "ppb_attach(): this nexus %s hotplug slots\n", ppb->hotplug_capable == B_TRUE ? "has":"has no"); ppb_fm_init(ppb); ddi_report_dev(devi); return (DDI_SUCCESS); case DDI_RESUME: /* * Get the soft state structure for the bridge. */ ppb = (ppb_devstate_t *) ddi_get_soft_state(ppb_state, ddi_get_instance(devi)); pci_pwr_resume(devi, ppb->ppb_pwr_p); return (DDI_SUCCESS); } return (DDI_FAILURE); }
int main(int argc, char *const argv[]) { int ret; unsigned n_clusters = 1; int opt; while ((opt = getopt(argc, argv, "c:h")) != -1) { switch (opt) { case 'c': n_clusters = atoi(optarg); break; case 'h': printf("Usage: %s [ -c <n_clusters> (number of l2fwd clusters, 1..14) ]", argv[0]); exit(0); break; default: /* '?' */ fprintf(stderr, "Wrong arguments\n"); return -1; } } if (!n_clusters) n_clusters = 1; if (n_clusters > 14) n_clusters = 14; ret = pcie_init(MPPA_PCIE_ETH_IF_MAX, 0); if (ret != 0) { fprintf(stderr, "Failed to initialize PCIe eth interface\n"); exit(1); } ret = odp_rpc_server_start(); if (ret) { fprintf(stderr, "[RPC] Error: Failed to start server\n"); exit(EXIT_FAILURE); } if ( __k1_get_cluster_id() == 128 ) { printf("Spawning clusters\n"); { static char const * _argv[] = { "odp_l2fwd.kelf", "-i", "e0:tags=120:min_payload=48:max_payload=48,e1:tags=120:min_payload=48:max_payload=48", "-m", "0", "-s", "0", "-a", "2", //"-S", //"-t", "30", "-c", "8", NULL }; for (unsigned i = 0; i < n_clusters; i++) boot_cluster(i, _argv[0], _argv); } if (1) { static char const * _argv[] = { "odp_generator.kelf", "-I", "e0:nofree", // generates traffic on eth0, "--srcmac", "08:00:27:76:b5:e0", "--dstmac", "00:00:00:00:80:01", "--srcip", "192.168.111.2", "--dstip", "192.168.222.2", "-m", "u", // UDP mode "-i", "0", // interval between sends "-w", "1", // worker generating traffic per cluster "-P", "64", // total packet length 64B NULL }; boot_cluster(14, _argv[0], _argv); } if (1) { static char const * _argv[] = { "odp_generator.kelf", "-I", "e1:nofree", // generates traffic on eth1, "--srcmac", "08:00:27:76:b5:e1", "--dstmac", "00:00:00:00:80:01", "--srcip", "192.168.111.2", "--dstip", "192.168.222.2", "-m", "u", // UDP mode "-i", "0", // interval between sends "-w", "1", // worker generating traffic per cluster "-P", "64", // total packet length 64B NULL }; boot_cluster(15, _argv[0], _argv); } printf("Clusters booted\n"); } join_clusters(NULL); return 0; }
/*ARGSUSED*/ static int ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) { dev_info_t *root = ddi_root_node(); int instance; ppb_devstate_t *ppb; dev_info_t *pdip; ddi_acc_handle_t config_handle; char *bus; int ret; switch (cmd) { case DDI_ATTACH: /* * Make sure the "device_type" property exists. */ (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", "pci"); /* * Allocate and get soft state structure. */ instance = ddi_get_instance(devi); if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS) return (DDI_FAILURE); ppb = ddi_get_soft_state(ppb_state, instance); ppb->dip = devi; /* * don't enable ereports if immediate child of npe */ if (strcmp(ddi_driver_name(ddi_get_parent(devi)), "npe") == 0) ppb->ppb_fmcap = DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; else ppb->ppb_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; ddi_fm_init(devi, &ppb->ppb_fmcap, &ppb->ppb_fm_ibc); mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL); mutex_init(&ppb->ppb_err_mutex, NULL, MUTEX_DRIVER, (void *)ppb->ppb_fm_ibc); mutex_init(&ppb->ppb_peek_poke_mutex, NULL, MUTEX_DRIVER, (void *)ppb->ppb_fm_ibc); if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE | DDI_FM_EREPORT_CAPABLE)) pci_ereport_setup(devi); if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE) ddi_fm_handler_register(devi, ppb_fm_callback, NULL); if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) { if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE) ddi_fm_handler_unregister(devi); if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE | DDI_FM_EREPORT_CAPABLE)) pci_ereport_teardown(devi); ddi_fm_fini(devi); ddi_soft_state_free(ppb_state, instance); return (DDI_FAILURE); } ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; for (pdip = ddi_get_parent(devi); pdip && (pdip != root) && (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV); pdip = ddi_get_parent(pdip)) { if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, "device_type", &bus) != DDI_PROP_SUCCESS) break; if (strcmp(bus, "pciex") == 0) ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCIE_DEV; ddi_prop_free(bus); } if (ppb_support_ht_msimap == 1) (void) ppb_ht_msimap_set(config_handle, HT_MSIMAP_ENABLE); else if (ppb_support_ht_msimap == -1) (void) ppb_ht_msimap_set(config_handle, HT_MSIMAP_DISABLE); pci_config_teardown(&config_handle); /* * Initialize hotplug support on this bus. */ if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) ret = pcie_init(devi, NULL); else ret = pcihp_init(devi); if (ret != DDI_SUCCESS) { cmn_err(CE_WARN, "pci: Failed to setup hotplug framework"); (void) ppb_detach(devi, DDI_DETACH); return (ret); } ddi_report_dev(devi); return (DDI_SUCCESS); case DDI_RESUME: /* * Get the soft state structure for the bridge. */ ppb = ddi_get_soft_state(ppb_state, ddi_get_instance(devi)); ppb_restore_config_regs(ppb); return (DDI_SUCCESS); default: break; } return (DDI_FAILURE); }