static int dt_cpufreq_probe(struct platform_device *pdev) { struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; int ret; /* * All per-cluster (CPUs sharing clock/voltages) initialization is done * from ->init(). In probe(), we just need to make sure that clk and * regulators are available. Else defer probe and retry. * * FIXME: Is checking this only for CPU0 sufficient ? */ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk); if (ret) return ret; clk_put(cpu_clk); if (!IS_ERR(cpu_reg)) regulator_put(cpu_reg); dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev); ret = cpufreq_register_driver(&dt_cpufreq_driver); if (ret) dev_err(cpu_dev, "failed register driver: %d\n", ret); return ret; }
int gst_video1_encoder_set(struct videnc_state **stp, const struct vidcodec *vc, struct videnc_param *prm, const char *fmtp, videnc_packet_h *pkth, void *arg) { struct videnc_state *st = *stp; int err = 0; if (!stp || !vc || !prm || !pkth) return EINVAL; if (!st) { err = allocate_resources(stp); if (err) { warning("gst_video: resource allocation failed\n"); return err; } st = *stp; st->pkth = pkth; st->arg = arg; } else { if (!st->streamer.valid) { warning("gst_video codec: trying to work" " with invalid pipeline\n"); return EINVAL; } if ((st->encoder.bitrate != prm->bitrate || st->encoder.pktsize != prm->pktsize || st->encoder.fps != prm->fps)) { pipeline_close(st); } } st->encoder.bitrate = prm->bitrate; st->encoder.pktsize = prm->pktsize; st->encoder.fps = prm->fps; if (str_isset(fmtp)) { struct pl sdp_fmtp; pl_set_str(&sdp_fmtp, fmtp); /* store new parameters */ fmt_param_apply(&sdp_fmtp, param_handler, st); } info("gst_video: video encoder %s: %d fps, %d bit/s, pktsize=%u\n", vc->name, st->encoder.fps, st->encoder.bitrate, st->encoder.pktsize); return err; }
Meshes::Meshes(VkDevice dev, const std::vector<VkMemoryPropertyFlags> &mem_flags) : dev_(dev), vertex_input_binding_(Mesh::vertex_input_binding()), vertex_input_attrs_(Mesh::vertex_input_attributes()), vertex_input_state_(), input_assembly_state_(Mesh::input_assembly_state()), index_type_(Mesh::index_type()) { vertex_input_state_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state_.vertexBindingDescriptionCount = 1; vertex_input_state_.pVertexBindingDescriptions = &vertex_input_binding_; vertex_input_state_.vertexAttributeDescriptionCount = static_cast<uint32_t>(vertex_input_attrs_.size()); vertex_input_state_.pVertexAttributeDescriptions = vertex_input_attrs_.data(); std::array<Mesh, MESH_COUNT> meshes; build_meshes(meshes); draw_commands_.reserve(meshes.size()); uint32_t first_index = 0; int32_t vertex_offset = 0; VkDeviceSize vb_size = 0; VkDeviceSize ib_size = 0; for (const auto &mesh : meshes) { VkDrawIndexedIndirectCommand draw = {}; draw.indexCount = mesh.index_count(); draw.instanceCount = 1; draw.firstIndex = first_index; draw.vertexOffset = vertex_offset; draw.firstInstance = 0; draw_commands_.push_back(draw); first_index += mesh.index_count(); vertex_offset += mesh.vertex_count(); vb_size += mesh.vertex_buffer_size(); ib_size += mesh.index_buffer_size(); } allocate_resources(vb_size, ib_size, mem_flags); uint8_t *vb_data, *ib_data; vk::assert_success(vk::MapMemory(dev_, mem_, 0, VK_WHOLE_SIZE, 0, reinterpret_cast<void **>(&vb_data))); ib_data = vb_data + ib_mem_offset_; for (const auto &mesh : meshes) { mesh.vertex_buffer_write(vb_data); mesh.index_buffer_write(ib_data); vb_data += mesh.vertex_buffer_size(); ib_data += mesh.index_buffer_size(); } vk::UnmapMemory(dev_, mem_); }
void *_serverstart() { android_log(ANDROID_LOG_VERBOSE,"ntripcaster status before starting:%d",get_ntripcaster_state()); set_run_path(appPath); thread_lib_init (); init_thread_tree (__LINE__, __FILE__); setup_defaults (); setup_signal_traps (); allocate_resources (); init_authentication_scheme (); parse_default_config_file (); initialize_network (); startup_mode (); android_log(ANDROID_LOG_VERBOSE,"ntripcaster status after ending:%d",get_ntripcaster_state()); pthread_exit(0); android_log(ANDROID_LOG_VERBOSE,"ntripcaster ended"); }
static int process_instruction(process *proc, queue *q_ready, queue *q_process, queue *q_wait, char *token_arr[]) { matrix_t i; if (!strcmp(token_arr[0], "RQ")) { /* process a request for allocation of resources */ for (i = 0; i < NRES; i++) { proc->request_vector[i] = atoi(token_arr[i+1]); } return allocate_resources(proc, q_ready, q_process, q_wait); } else if (!strcmp(token_arr[0], "RL")) { /* process a request to release resources */ for (i = 0; i < NRES; i++) { proc->release_vector[i] = atoi(token_arr[i+1]); } return deallocate_resources(proc, q_ready, q_process, q_wait, FALSE); } else if (!strcmp(token_arr[0], "SL")) { /* put this process to sleep */ enqueue(q_ready, proc); return 1; /* note the 1 return value (prototype header for more information */ } else if (!strcmp(token_arr[0], "END")) { /* terminate this process successfully */ return deallocate_resources(proc, q_ready, q_process, q_wait, TRUE); } else { /* an invalid instruction was found */ perror("Invalid instruction detected.\nExiting...\n"); exit(EXIT_FAILURE); } }
static int cpufreq_init(struct cpufreq_policy *policy) { struct cpufreq_dt_platform_data *pd; struct cpufreq_frequency_table *freq_table; struct thermal_cooling_device *cdev; struct device_node *np; struct private_data *priv; struct device *cpu_dev; struct regulator *cpu_reg; struct clk *cpu_clk; unsigned long min_uV = ~0, max_uV = 0; unsigned int transition_latency; int ret; ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); if (ret) { pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); return ret; } np = of_node_get(cpu_dev->of_node); if (!np) { dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); ret = -ENOENT; goto out_put_reg_clk; } /* OPPs might be populated at runtime, don't check for error here */ of_init_opp_table(cpu_dev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto out_put_node; } of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); if (of_property_read_u32(np, "clock-latency", &transition_latency)) transition_latency = CPUFREQ_ETERNAL; if (!IS_ERR(cpu_reg)) { unsigned long opp_freq = 0; /* * Disable any OPPs where the connected regulator isn't able to * provide the specified voltage and record minimum and maximum * voltage levels. */ while (1) { struct dev_pm_opp *opp; unsigned long opp_uV, tol_uV; rcu_read_lock(); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq); if (IS_ERR(opp)) { rcu_read_unlock(); break; } opp_uV = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); tol_uV = opp_uV * priv->voltage_tolerance / 100; if (regulator_is_supported_voltage(cpu_reg, opp_uV, opp_uV + tol_uV)) { if (opp_uV < min_uV) min_uV = opp_uV; if (opp_uV > max_uV) max_uV = opp_uV; } else { dev_pm_opp_disable(cpu_dev, opp_freq); } opp_freq++; } ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); if (ret > 0) transition_latency += ret * 1000; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); goto out_free_priv; } /* * For now, just loading the cooling device; * thermal DT code takes care of matching them. */ if (of_find_property(np, "#cooling-cells", NULL)) { cdev = of_cpufreq_cooling_register(np, cpu_present_mask); if (IS_ERR(cdev)) dev_err(cpu_dev, "running cpufreq without cooling device: %ld\n", PTR_ERR(cdev)); else priv->cdev = cdev; } priv->cpu_dev = cpu_dev; priv->cpu_reg = cpu_reg; policy->driver_data = priv; policy->clk = cpu_clk; ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, ret); goto out_cooling_unregister; } policy->cpuinfo.transition_latency = transition_latency; pd = cpufreq_get_driver_data(); if (!pd || !pd->independent_clocks) cpumask_setall(policy->cpus); of_node_put(np); return 0; out_cooling_unregister: cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); out_free_priv: kfree(priv); out_put_node: of_node_put(np); out_put_reg_clk: clk_put(cpu_clk); if (!IS_ERR(cpu_reg)) regulator_put(cpu_reg); return ret; }
/** * Configure devices on the devices tree. * * Starting at the root of the device tree, travel it recursively in two * passes. In the first pass, we compute and allocate resources (ranges) * requried by each device. In the second pass, the resources ranges are * relocated to their final position and stored to the hardware. * * I/O resources grow upward. MEM resources grow downward. * * Since the assignment is hierarchical we set the values into the dev_root * struct. */ void dev_configure(void) { struct resource *res; struct device *root; struct device *child; set_vga_bridge_bits(); printk(BIOS_INFO, "Allocating resources...\n"); root = &dev_root; /* * Each domain should create resources which contain the entire address * space for IO, MEM, and PREFMEM resources in the domain. The * allocation of device resources will be done from this address space. */ /* Read the resources for the entire tree. */ printk(BIOS_INFO, "Reading resources...\n"); read_resources(root->link_list); printk(BIOS_INFO, "Done reading resources.\n"); print_resource_tree(root, BIOS_SPEW, "After reading."); /* Compute resources for all domains. */ for (child = root->link_list->children; child; child = child->sibling) { if (!(child->path.type == DEVICE_PATH_DOMAIN)) continue; for (res = child->resource_list; res; res = res->next) { if (res->flags & IORESOURCE_FIXED) continue; if (res->flags & IORESOURCE_PREFETCH) { compute_resources(child->link_list, res, MEM_MASK, PREF_TYPE); continue; } if (res->flags & IORESOURCE_MEM) { compute_resources(child->link_list, res, MEM_MASK, MEM_TYPE); continue; } if (res->flags & IORESOURCE_IO) { compute_resources(child->link_list, res, IO_MASK, IO_TYPE); continue; } } } /* For all domains. */ for (child = root->link_list->children; child; child=child->sibling) if (child->path.type == DEVICE_PATH_DOMAIN) avoid_fixed_resources(child); /* * Now we need to adjust the resources. MEM resources need to start at * the highest address managable. */ for (child = root->link_list->children; child; child = child->sibling) { if (child->path.type != DEVICE_PATH_DOMAIN) continue; for (res = child->resource_list; res; res = res->next) { if (!(res->flags & IORESOURCE_MEM) || res->flags & IORESOURCE_FIXED) continue; res->base = resource_max(res); } } /* Store the computed resource allocations into device registers ... */ printk(BIOS_INFO, "Setting resources...\n"); for (child = root->link_list->children; child; child = child->sibling) { if (!(child->path.type == DEVICE_PATH_DOMAIN)) continue; for (res = child->resource_list; res; res = res->next) { if (res->flags & IORESOURCE_FIXED) continue; if (res->flags & IORESOURCE_PREFETCH) { allocate_resources(child->link_list, res, MEM_MASK, PREF_TYPE); continue; } if (res->flags & IORESOURCE_MEM) { allocate_resources(child->link_list, res, MEM_MASK, MEM_TYPE); continue; } if (res->flags & IORESOURCE_IO) { allocate_resources(child->link_list, res, IO_MASK, IO_TYPE); continue; } } } assign_resources(root->link_list); printk(BIOS_INFO, "Done setting resources.\n"); print_resource_tree(root, BIOS_SPEW, "After assigning values."); printk(BIOS_INFO, "Done allocating resources.\n"); }
/** * This function is the second part of the resource allocator. * * See the compute_resources function for a more detailed explanation. * * This function assigns the resources a value. * * @param bus The bus we are traversing. * @param bridge The bridge resource which must contain the bus' resources. * @param type_mask This value gets ANDed with the resource type. * @param type This value must match the result of the AND. * * @see compute_resources */ static void allocate_resources(struct bus *bus, struct resource *bridge, unsigned long type_mask, unsigned long type) { struct device *dev; struct resource *resource; resource_t base; base = bridge->base; printk(BIOS_SPEW, "%s %s_%s: base:%llx size:%llx align:%d gran:%d " "limit:%llx\n", dev_path(bus->dev), __func__, (type & IORESOURCE_IO) ? "io" : (type & IORESOURCE_PREFETCH) ? "prefmem" : "mem", base, bridge->size, bridge->align, bridge->gran, bridge->limit); /* Remember we haven't found anything yet. */ resource = NULL; /* * Walk through all the resources on the current bus and allocate them * address space. */ while ((dev = largest_resource(bus, &resource, type_mask, type))) { /* Propagate the bridge limit to the resource register. */ if (resource->limit > bridge->limit) resource->limit = bridge->limit; /* Size 0 resources can be skipped. */ if (!resource->size) { /* Set the base to limit so it doesn't confuse tolm. */ resource->base = resource->limit; resource->flags |= IORESOURCE_ASSIGNED; continue; } if (resource->flags & IORESOURCE_IO) { /* * Don't allow potential aliases over the legacy PCI * expansion card addresses. The legacy PCI decodes * only 10 bits, uses 0x100 - 0x3ff. Therefore, only * 0x00 - 0xff can be used out of each 0x400 block of * I/O space. */ if ((base & 0x300) != 0) { base = (base & ~0x3ff) + 0x400; } /* * Don't allow allocations in the VGA I/O range. * PCI has special cases for that. */ else if ((base >= 0x3b0) && (base <= 0x3df)) { base = 0x3e0; } } if ((round(base, resource->align) + resource->size - 1) <= resource->limit) { /* Base must be aligned. */ base = round(base, resource->align); resource->base = base; resource->flags |= IORESOURCE_ASSIGNED; resource->flags &= ~IORESOURCE_STORED; base += resource->size; } else { printk(BIOS_ERR, "!! Resource didn't fit !!\n"); printk(BIOS_ERR, " aligned base %llx size %llx " "limit %llx\n", round(base, resource->align), resource->size, resource->limit); printk(BIOS_ERR, " %llx needs to be <= %llx " "(limit)\n", (round(base, resource->align) + resource->size) - 1, resource->limit); printk(BIOS_ERR, " %s%s %02lx * [0x%llx - 0x%llx]" " %s\n", (resource->flags & IORESOURCE_ASSIGNED) ? "Assigned: " : "", dev_path(dev), resource->index, resource->base, resource->base + resource->size - 1, (resource->flags & IORESOURCE_IO) ? "io" : (resource->flags & IORESOURCE_PREFETCH) ? "prefmem" : "mem"); } printk(BIOS_SPEW, "%s%s %02lx * [0x%llx - 0x%llx] %s\n", (resource->flags & IORESOURCE_ASSIGNED) ? "Assigned: " : "", dev_path(dev), resource->index, resource->base, resource->size ? resource->base + resource->size - 1 : resource->base, (resource->flags & IORESOURCE_IO) ? "io" : (resource->flags & IORESOURCE_PREFETCH) ? "prefmem" : "mem"); } /* * A PCI bridge resource does not need to be a power of two size, but * it does have a minimum granularity. Round the size up to that * minimum granularity so we know not to place something else at an * address positively decoded by the bridge. */ bridge->flags |= IORESOURCE_ASSIGNED; printk(BIOS_SPEW, "%s %s_%s: next_base: %llx size: %llx align: %d " "gran: %d done\n", dev_path(bus->dev), __func__, (type & IORESOURCE_IO) ? "io" : (type & IORESOURCE_PREFETCH) ? "prefmem" : "mem", base, bridge->size, bridge->align, bridge->gran); /* For each child which is a bridge, allocate_resources. */ for (dev = bus->children; dev; dev = dev->sibling) { struct resource *child_bridge; if (!dev->link_list) continue; /* Find the resources with matching type flags. */ for (child_bridge = dev->resource_list; child_bridge; child_bridge = child_bridge->next) { struct bus* link; if (!(child_bridge->flags & IORESOURCE_BRIDGE) || (child_bridge->flags & type_mask) != type) continue; /* * Split prefetchable memory if combined. Many domains * use the same address space for prefetchable memory * and non-prefetchable memory. Bridges below them need * it separated. Add the PREFETCH flag to the type_mask * and type. */ link = dev->link_list; while (link && link->link_num != IOINDEX_LINK(child_bridge->index)) link = link->next; if (link == NULL) printk(BIOS_ERR, "link %ld not found on %s\n", IOINDEX_LINK(child_bridge->index), dev_path(dev)); allocate_resources(link, child_bridge, type_mask | IORESOURCE_PREFETCH, type | (child_bridge->flags & IORESOURCE_PREFETCH)); } } }
static status_t open_hook(const char *name, uint32 flags, void** cookie) { dp83815_properties_t *data; uint8 temp8; // uint16 temp16; uint32 temp32; unsigned char cmd; TRACE(( kDevName " open_hook()\n" )); // verify device access { char *thisName; int32 mask; // search for device name for (temp8 = 0; (thisName = dp83815_names[temp8]) != NULL; temp8++) { if (!strcmp(name, thisName)) break; } if (!thisName) return EINVAL; // check if device is already open mask = 1L << temp8; if (atomic_or(&m_openmask, mask) & mask) return B_BUSY; } //Create a structure that contains the internals if (!(*cookie = data = (dp83815_properties_t *)malloc(sizeof(dp83815_properties_t)))) { TRACE(( kDevName " open_hook(): Out of memory\n" )); return B_NO_MEMORY; } //Set status to open: m_openmask &= ~( 1L << temp8 ); //Clear memory memset( data , 0 , sizeof( dp83815_properties_t ) ); //Set the ID data->device_id = temp8; // Create lock data->lock = create_sem( 1 , kDevName " data protect" ); set_sem_owner( data->lock , B_SYSTEM_TEAM ); data->Rx.Sem = create_sem( 0 , kDevName " read wait" ); set_sem_owner( data->Rx.Sem , B_SYSTEM_TEAM ); data->Tx.Sem = create_sem( 1 , kDevName " write wait" ); set_sem_owner( data->Tx.Sem , B_SYSTEM_TEAM ); //Set up the cookie data->pcii = m_devices[data->device_id]; //Enable the registers dp83815_init_registers( data ); /* enable pci address access */ cmd = m_pcimodule->read_pci_config(data->pcii->bus, data->pcii->device, data->pcii->function, PCI_command, 2); cmd = cmd | PCI_command_io | PCI_command_master | PCI_command_memory; m_pcimodule->write_pci_config(data->pcii->bus, data->pcii->device, data->pcii->function, PCI_command, 2, cmd ); if (allocate_resources(data) != B_OK) goto err1; /* We want interrupts! */ if ( install_io_interrupt_handler( data->pcii->u.h0.interrupt_line , dp83815_interrupt_hook , data , 0 ) != B_OK ) { TRACE(( kDevName " open_hook(): Error installing interrupt handler\n" )); return B_ERROR; } { temp32 = read32(REG_SRR); TRACE(( "SRR: %x\n", temp32)); } write32(REG_CR, CR_RXR|CR_TXR); /* Reset Tx & Rx */ if ( init_ring_buffers(data) != B_OK ) /* Init ring buffers */ goto err1; write32(REG_RFCR, RFCR_RFEN|RFCR_AAB|RFCR_AAM|RFCR_AAU); write32(REG_RXCFG, RXCFG_ATP|RXCFG_DRTH(31)); /* Set the drth */ write32(REG_TXCFG, TXCFG_CSI| TXCFG_HBI| TXCFG_ATP| TXCFG_MXDMA_256| TXCFG_FLTH(16)| TXCFG_DRTH(16) ); write32(REG_IMR, ISR_RXIDLE | ISR_TXOK | ISR_RXOK ); write32(REG_CR, CR_RXE); /* Enable Rx */ write32(REG_IER, 1); /* Enable interrupts */ return B_OK; err1: free_resources(data); free(data); return B_ERROR; }