static void pdf_filter_cs(pdf_csi *csi, void *state_) { pdf_filter_state *state = (pdf_filter_state *)state_; filter_gstate *gstate = gstate_to_update(csi, state); insert_resource(csi, state, "ColorSpace"); fz_strlcpy(gstate->cs, csi->name, sizeof(csi->name)); gstate->color_n = 0; }
/* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ static int __init pcibios_init(void) { resource_size_t io_offset, mem_offset; LIST_HEAD(resources); struct pci_bus *bus; ioport_resource.start = 0xA0000000; ioport_resource.end = 0xDFFFFFFF; iomem_resource.start = 0xA0000000; iomem_resource.end = 0xDFFFFFFF; if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return 0; if (pci_check_direct() < 0) { printk(KERN_WARNING "PCI: No PCI bus detected\n"); return 0; } printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n", MEM_PAGING_REG); io_offset = pci_ioport_resource.start - (pci_ioport_resource.start & 0x00ffffff); mem_offset = pci_iomem_resource.start - ((pci_iomem_resource.start & 0x03ffffff) | MEM_PAGING_REG); pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset); pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset); bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources); if (!bus) return 0; pcibios_resource_survey(); pci_bus_add_devices(bus); return 0; }
static int __init lapic_insert_resource(void) { if (!apic_phys) return -1; /* Put local APIC into the resource map. */ lapic_resource.start = apic_phys; lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; insert_resource(&iomem_resource, &lapic_resource); return 0; }
static int __init lapic_insert_resource(void) { if (!apic_phys) return -1; lapic_resource.start = apic_phys; lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1; insert_resource(&iomem_resource, &lapic_resource); return 0; }
static fs_rid insert_uri(xmlctxt *ctxt) { char *uri = ctxt->resource; if (!uri || uri[0] == '\0') { fs_error(LOG_ERR, "NULL URI inserted"); return 0; } fs_rid r = fs_hash_uri(uri); insert_resource(ctxt, r, fs_c.empty, uri); return r; }
void __init __weak pci_mmcfg_late_init(void) { struct pci_mmcfg_region *cfg; acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); if (list_empty(&pci_mmcfg_list)) return; if (!pci_mmcfg_arch_init()) free_all_mmcfg(); list_for_each_entry(cfg, &pci_mmcfg_list, list) insert_resource(&iomem_resource, &cfg->res); }
static void insert_status(xmlNode *status, struct assembly *assembly) { struct operation_history *oh; xmlNode *resource_xml; xmlNode *resources_xml; xmlNode *lrm_xml; struct resource *resource; qb_map_iter_t *iter; qb_enter(); qb_log(LOG_DEBUG, "Inserting assembly %s", assembly->name); xmlNode *node_state = xmlNewChild(status, NULL, BAD_CAST "node_state", NULL); xmlNewProp(node_state, BAD_CAST "id", BAD_CAST assembly->uuid); xmlNewProp(node_state, BAD_CAST "uname", BAD_CAST assembly->name); xmlNewProp(node_state, BAD_CAST "ha", BAD_CAST "active"); xmlNewProp(node_state, BAD_CAST "expected", BAD_CAST "member"); xmlNewProp(node_state, BAD_CAST "in_ccm", BAD_CAST "true"); xmlNewProp(node_state, BAD_CAST "crmd", BAD_CAST "online"); /* check state*/ if (assembly->recover.state == RECOVER_STATE_RUNNING) { xmlNewProp(node_state, BAD_CAST "join", BAD_CAST "member"); qb_log(LOG_DEBUG, "Assembly '%s' marked as member", assembly->name); } else { xmlNewProp(node_state, BAD_CAST "join", BAD_CAST "pending"); qb_log(LOG_DEBUG, "Assembly '%s' marked as pending", assembly->name); } lrm_xml = xmlNewChild(node_state, NULL, BAD_CAST "lrm", NULL); resources_xml = xmlNewChild(lrm_xml, NULL, BAD_CAST "lrm_resources", NULL); iter = qb_map_iter_create(op_history_map); while ((qb_map_iter_next(iter, (void **)&oh)) != NULL) { resource = oh->resource; if (strstr(resource->name, assembly->name) == NULL) { continue; } resource_xml = insert_resource(resources_xml, oh->resource); op_history_insert(resource_xml, oh); } qb_map_iter_free(iter); qb_leave(); }
static int __init ioapic_insert_resources(void) { int i; struct resource *r = ioapic_resources; if (!r) { printk("IO APIC resources could be not be allocated.\n"); return -1; } for (i = 0; i < nr_ioapics; i++) { insert_resource(&iomem_resource, r); r++; } return 0; }
static void pdf_filter_scn(pdf_csi *csi, void *state_) { pdf_filter_state *state = (pdf_filter_state *)state_; filter_gstate *gstate = gstate_to_update(csi, state); int i; if (csi->name[0]) insert_resource(csi, state, "Pattern"); fz_strlcpy(gstate->cs_name, csi->name, sizeof(csi->name)); for (i = 0; i < csi->top; i++) { gstate->color[i] = csi->stack[i]; } gstate->color_n = csi->top; }
static __devinit acpi_status add_window(struct acpi_resource *res, void *data) { struct pci_root_info *info = data; struct pci_window *window; struct acpi_resource_address64 addr; acpi_status status; unsigned long flags, offset = 0; struct resource *root; status = acpi_resource_to_address64(res, &addr); if (!ACPI_SUCCESS(status)) return AE_OK; if (!addr.address_length) return AE_OK; if (addr.resource_type == ACPI_MEMORY_RANGE) { flags = IORESOURCE_MEM; root = &iomem_resource; offset = addr.address_translation_offset; } else if (addr.resource_type == ACPI_IO_RANGE) { flags = IORESOURCE_IO; root = &ioport_resource; offset = add_io_space(&addr); if (offset == ~0) return AE_OK; } else return AE_OK; window = &info->controller->window[info->controller->windows++]; window->resource.name = info->name; window->resource.flags = flags; window->resource.start = addr.min_address_range + offset; window->resource.end = addr.max_address_range + offset; window->resource.child = NULL; window->offset = offset; if (insert_resource(root, &window->resource)) { printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n", window->resource.start, window->resource.end, root->name, info->name); } return AE_OK; }
static int __devinit alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end, unsigned long flags) { struct resource *res; res = kmalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; memset(res, 0, sizeof(*res)); res->name = name; res->start = start; res->end = end; res->flags = flags; if (insert_resource(root, res)) return -EBUSY; return 0; }
static int __init lantiq_ebu_init(void) { /* */ if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) panic("Failed to insert ebu memory"); if (request_mem_region(ltq_ebu_resource.start, resource_size(<q_ebu_resource), "ebu") < 0) panic("Failed to request ebu memory"); /* */ ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, resource_size(<q_ebu_resource)); if (!ltq_ebu_membase) panic("Failed to remap ebu memory"); /* */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); return 0; }
void x86_pci_root_bus_res_quirks(struct pci_bus *b) { int i; int j; struct pci_root_info *info; /* don't go for it if _CRS is used already */ if (b->resource[0] != &ioport_resource || b->resource[1] != &iomem_resource) return; /* if only one root bus, don't need to anything */ if (pci_root_num < 2) return; for (i = 0; i < pci_root_num; i++) { if (pci_root_info[i].bus_min == b->number) break; } if (i == pci_root_num) return; printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", b->number); info = &pci_root_info[i]; for (j = 0; j < info->res_num; j++) { struct resource *res; struct resource *root; res = &info->res[j]; b->resource[j] = res; if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; insert_resource(root, res); } }
static int __init mips_reboot_setup(void) { /* insert and request the memory region */ if (insert_resource(&iomem_resource, <q_rcu_resource) < 0) panic("Failed to insert rcu memory"); if (request_mem_region(ltq_rcu_resource.start, resource_size(<q_rcu_resource), "rcu") < 0) panic("Failed to request rcu memory"); /* remap rcu register range */ ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start, resource_size(<q_rcu_resource)); if (!ltq_rcu_membase) panic("Failed to remap rcu memory"); _machine_restart = ltq_machine_restart; _machine_halt = ltq_machine_halt; pm_power_off = ltq_machine_power_off; return 0; }
/* * Reserve memory for kdump kernel to be loaded with kexec */ static void __init reserve_crashkernel(void) { #ifdef CONFIG_CRASH_DUMP unsigned long long crash_base, crash_size; char *msg; int rc; rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, &crash_base); if (rc || crash_size == 0) return; crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); if (register_memory_notifier(&kdump_mem_nb)) return; if (!crash_base) crash_base = find_crash_base(crash_size, &msg); if (!crash_base) { pr_info("crashkernel reservation failed: %s\n", msg); unregister_memory_notifier(&kdump_mem_nb); return; } if (verify_crash_base(crash_base, crash_size, &msg)) { pr_info("crashkernel reservation failed: %s\n", msg); unregister_memory_notifier(&kdump_mem_nb); return; } if (!OLDMEM_BASE && MACHINE_IS_VM) diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); pr_info("Reserving %lluMB of memory at %lluMB " "for crashkernel (System RAM: %luMB)\n", crash_size >> 20, crash_base >> 20, memory_end >> 20); #endif }
void __iomem *ltq_remap_resource(struct resource *res) { __iomem void *ret = NULL; struct resource *lookup = lookup_resource(&iomem_resource, res->start); if (lookup && strcmp(lookup->name, res->name)) { panic("conflicting memory range %s\n", res->name); return NULL; } if (!lookup) { if (insert_resource(&iomem_resource, res) < 0) { panic("Failed to insert %s memory\n", res->name); return NULL; } } if (request_mem_region(res->start, resource_size(res), res->name) < 0) { panic("Failed to request %s memory\n", res->name); goto err_res; } ret = ioremap_nocache(res->start, resource_size(res)); if (!ret) goto err_mem; pr_debug("remap: 0x%08X-0x%08X : \"%s\"\n", res->start, res->end, res->name); return ret; err_mem: panic("Failed to remap %s memory\n", res->name); release_mem_region(res->start, resource_size(res)); err_res: release_resource(res); return NULL; }
int main(int argc, char **argv) { CUR_PID = getpid(); extern char *optarg; double us = 0; int printHelp = 0; char *neighbor = NULL; char *zht_cfg = NULL; char *protocol = NULL; int is_init = 0; int is_lookup = 0; int c; while ((c = getopt(argc, argv, "n:z:p:ilh")) != -1) { switch (c) { case 'n': neighbor = optarg; break; case 'z': zht_cfg = optarg; break; case 'p': protocol = optarg; break; case 'i': is_init = 1; break; case 'l': is_lookup = 1; break; case 'h': printHelp = 1; break; default: fprintf(stdout, "Illegal argument \"%c\"\n", c); printUsage(argv[0]); exit(1); } } if (printHelp) { printUsage(argv[0]); exit(1); } if (neighbor != NULL && zht_cfg != NULL && protocol != NULL) { bool useTCP = false; if (!strcmp("TCP", protocol)) { useTCP = true; } else { useTCP = false; } /*init...*/ c_zht_init(neighbor, zht_cfg, useTCP); //neighbor zht.cfg TCP if (is_init) { insert_resource(); } else if (is_lookup) { lookup_nodes(); } else { test_compare_swap(); } /*clear...*/ c_zht_teardown(); } else { printUsage(argv[0]); exit(1); } return 0; }
void __init arch_init_irq(void) { int i; if (insert_resource(&iomem_resource, <q_icu_resource) < 0) panic("Failed to insert icu memory"); if (request_mem_region(ltq_icu_resource.start, resource_size(<q_icu_resource), "icu") < 0) panic("Failed to request icu memory"); ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, resource_size(<q_icu_resource)); if (!ltq_icu_membase) panic("Failed to remap icu memory"); if (insert_resource(&iomem_resource, <q_eiu_resource) < 0) panic("Failed to insert eiu memory"); if (request_mem_region(ltq_eiu_resource.start, resource_size(<q_eiu_resource), "eiu") < 0) panic("Failed to request eiu memory"); ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, resource_size(<q_eiu_resource)); if (!ltq_eiu_membase) panic("Failed to remap eiu memory"); for (i = 0; i < 5; i++) ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); mips_cpu_irq_init(); for (i = 2; i <= 6; i++) setup_irq(i, &cascade); if (cpu_has_vint) { pr_info("Setting up vectored interrupts\n"); set_vi_handler(2, ltq_hw0_irqdispatch); set_vi_handler(3, ltq_hw1_irqdispatch); set_vi_handler(4, ltq_hw2_irqdispatch); set_vi_handler(5, ltq_hw3_irqdispatch); set_vi_handler(6, ltq_hw4_irqdispatch); set_vi_handler(7, ltq_hw5_irqdispatch); } for (i = INT_NUM_IRQ0; i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || (i == LTQ_EIU_IR2)) irq_set_chip_and_handler(i, <q_eiu_type, handle_level_irq); else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) irq_set_chip_and_handler(i, <q_eiu_type, handle_level_irq); else irq_set_chip_and_handler(i, <q_irq_type, handle_level_irq); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #else set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #endif }
/** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * * This is part 2 of platform_device_register(), though may be called * separately _iff_ pdev was allocated by platform_device_alloc(). */ int platform_device_add(struct platform_device *pdev) { int i, ret; if (!pdev) return -EINVAL; if (!pdev->dev.parent) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; switch (pdev->id) { default: dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); break; case PLATFORM_DEVID_NONE: dev_set_name(&pdev->dev, "%s", pdev->name); break; case PLATFORM_DEVID_AUTO: /* * Automatically allocated device ID. We mark it as such so * that we remember it must be freed, and we append a suffix * to avoid namespace collision with explicit IDs. */ ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto err_out; pdev->id = ret; pdev->id_auto = true; dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); break; } for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p) { ret = insert_resource(p, r); if (ret) { dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); goto failed; } } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(&pdev->dev), dev_name(pdev->dev.parent)); ret = device_add(&pdev->dev); if (ret == 0) return ret; failed: if (pdev->id_auto) { ida_simple_remove(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } while (--i >= 0) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } err_out: return ret; }
static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) { gart_resource.start = aper_base; gart_resource.end = aper_base + aper_size - 1; insert_resource(&iomem_resource, &gart_resource); }