/** * vpbe_initialize() - Initialize the vpbe display controller * @vpbe_dev - vpbe device ptr * * Master frame buffer device drivers calls this to initialize vpbe * display controller. This will then registers v4l2 device and the sub * devices and sets a current encoder sub device for display. v4l2 display * device driver is the master and frame buffer display device driver is * the slave. Frame buffer display driver checks the initialized during * probe and exit if not initialized. Returns status. */ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev) { struct encoder_config_info *enc_info; struct amp_config_info *amp_info; struct v4l2_subdev **enc_subdev; struct osd_state *osd_device; struct i2c_adapter *i2c_adap; int num_encoders; int ret = 0; int err; int i; /* * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer * from the platform device by iteration of platform drivers and * matching with device name */ if (NULL == vpbe_dev || NULL == dev) { printk(KERN_ERR "Null device pointers.\n"); return -ENODEV; } if (vpbe_dev->initialized) return 0; mutex_lock(&vpbe_dev->lock); if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) { /* We have dac clock available for platform */ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac"); if (IS_ERR(vpbe_dev->dac_clk)) { ret = PTR_ERR(vpbe_dev->dac_clk); goto fail_mutex_unlock; } if (clk_prepare_enable(vpbe_dev->dac_clk)) { ret = -ENODEV; goto fail_mutex_unlock; } } /* first enable vpss clocks */ vpss_enable_clock(VPSS_VPBE_CLOCK, 1); /* First register a v4l2 device */ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev); if (ret) { v4l2_err(dev->driver, "Unable to register v4l2 device.\n"); goto fail_clk_put; } v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n"); err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev, platform_device_get); if (err < 0) { ret = err; goto fail_dev_unregister; } vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev, vpbe_dev->cfg->venc.module_name); /* register venc sub device */ if (vpbe_dev->venc == NULL) { v4l2_err(&vpbe_dev->v4l2_dev, "vpbe unable to init venc sub device\n"); ret = -ENODEV; goto fail_dev_unregister; } /* initialize osd device */ osd_device = vpbe_dev->osd_device; if (NULL != osd_device->ops.initialize) { err = osd_device->ops.initialize(osd_device); if (err) { v4l2_err(&vpbe_dev->v4l2_dev, "unable to initialize the OSD device"); err = -ENOMEM; goto fail_dev_unregister; } } /* * Register any external encoders that are configured. At index 0 we * store venc sd index. */ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1; vpbe_dev->encoders = kmalloc( sizeof(struct v4l2_subdev *)*num_encoders, GFP_KERNEL); if (NULL == vpbe_dev->encoders) { v4l2_err(&vpbe_dev->v4l2_dev, "unable to allocate memory for encoders sub devices"); ret = -ENOMEM; goto fail_dev_unregister; } i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id); for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) { if (i == 0) { /* venc is at index 0 */ enc_subdev = &vpbe_dev->encoders[i]; *enc_subdev = vpbe_dev->venc; continue; } enc_info = &vpbe_dev->cfg->ext_encoders[i]; if (enc_info->is_i2c) { enc_subdev = &vpbe_dev->encoders[i]; *enc_subdev = v4l2_i2c_new_subdev_board( &vpbe_dev->v4l2_dev, i2c_adap, &enc_info->board_info, NULL); if (*enc_subdev) v4l2_info(&vpbe_dev->v4l2_dev, "v4l2 sub device %s registered\n", enc_info->module_name); else { v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s" " failed to register", enc_info->module_name); ret = -ENODEV; goto fail_kfree_encoders; } } else v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders" " currently not supported"); } /* Add amplifier subdevice for dm365 */ if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) && vpbe_dev->cfg->amp != NULL) { amp_info = vpbe_dev->cfg->amp; if (amp_info->is_i2c) { vpbe_dev->amp = v4l2_i2c_new_subdev_board( &vpbe_dev->v4l2_dev, i2c_adap, &_info->board_info, NULL); if (!vpbe_dev->amp) { v4l2_err(&vpbe_dev->v4l2_dev, "amplifier %s failed to register", amp_info->module_name); ret = -ENODEV; goto fail_kfree_encoders; } v4l2_info(&vpbe_dev->v4l2_dev, "v4l2 sub device %s registered\n", amp_info->module_name); } else { vpbe_dev->amp = NULL; v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers" " currently not supported"); } } else { vpbe_dev->amp = NULL; } /* set the current encoder and output to that of venc by default */ vpbe_dev->current_sd_index = 0; vpbe_dev->current_out_index = 0; mutex_unlock(&vpbe_dev->lock); printk(KERN_NOTICE "Setting default output to %s\n", def_output); ret = vpbe_set_default_output(vpbe_dev); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s", def_output); return ret; } printk(KERN_NOTICE "Setting default mode to %s\n", def_mode); ret = vpbe_set_default_mode(vpbe_dev); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s", def_mode); return ret; } vpbe_dev->initialized = 1; /* TBD handling of bootargs for default output and mode */ return 0; fail_kfree_encoders: kfree(vpbe_dev->encoders); fail_dev_unregister: v4l2_device_unregister(&vpbe_dev->v4l2_dev); fail_clk_put: if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) { clk_disable_unprepare(vpbe_dev->dac_clk); clk_put(vpbe_dev->dac_clk); } fail_mutex_unlock: mutex_unlock(&vpbe_dev->lock); return ret; }
static void mcb_devices_unregister(struct mcb_bus *bus) { bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister); }
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops) { bus_register_notifier(bus, &iommu_device_nb); bus_for_each_dev(bus, NULL, NULL, add_iommu_group); }
int dss_resume_all_devices(void) { struct bus_type *bus = dss_get_bus(); return bus_for_each_dev(bus, NULL, NULL, dss_resume_device); }
/** * driver_attach - try to bind driver to devices. * @drv: driver. * * Walk the list of devices that the bus has on it and try to * match the driver with each one. If driver_probe_device() * returns 0 and the @dev->driver is set, we've found a * compatible pair. */ int driver_attach(struct device_driver *drv) { return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach); }
void __kprobes mca_handle_nmi(void) { bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback); }
static uint32_t register_client_legacy(struct msm_bus_scale_pdata *pdata) { struct msm_bus_client *client = NULL; int i; int src, dest, nfab; struct msm_bus_fabric_device *deffab; deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT); if (!deffab) { MSM_BUS_ERR("Error finding default fabric\n"); return 0; } nfab = msm_bus_get_num_fab(); if (nfab < deffab->board_algo->board_nfab) { MSM_BUS_ERR("Can't register client!\n" "Num of fabrics up: %d\n", nfab); return 0; } if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) { MSM_BUS_ERR("Cannot register client with null data\n"); return 0; } client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL); if (!client) { MSM_BUS_ERR("Error allocating client\n"); return 0; } mutex_lock(&msm_bus_lock); client->pdata = pdata; client->curr = -1; for (i = 0; i < pdata->usecase->num_paths; i++) { int *pnode; struct msm_bus_fabric_device *srcfab; pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)), GFP_KERNEL); if (ZERO_OR_NULL_PTR(pnode)) { MSM_BUS_ERR("Invalid Pnode ptr!\n"); continue; } else client->src_pnode = pnode; if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) { MSM_BUS_ERR("Invalid Master ID %d in request!\n", pdata->usecase->vectors[i].src); goto err; } if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) { MSM_BUS_ERR("Invalid Slave ID %d in request!\n", pdata->usecase->vectors[i].dst); goto err; } src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Client cannot be" " registered\n", pdata->usecase->vectors[i].src); goto err; } dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst); if (dest == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Client cannot be" " registered\n", pdata->usecase->vectors[i].dst); goto err; } srcfab = msm_bus_get_fabric_device(GET_FABID(src)); if (!srcfab) { MSM_BUS_ERR("Fabric not found\n"); goto err; } srcfab->visited = true; pnode[i] = getpath(src, dest); bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag); if (pnode[i] == -ENXIO) { MSM_BUS_ERR("Cannot register client now! Try again!\n"); goto err; } } msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER, (uint32_t)client); mutex_unlock(&msm_bus_lock); MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client, pdata->usecase->num_paths); return (uint32_t)(client); err: kfree(client->src_pnode); kfree(client); mutex_unlock(&msm_bus_lock); return 0; }
static void pr_spi_devices(void) { pr_info(DRVNAME": SPI devices registered:\n"); bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found); pr_info(DRVNAME":\n"); }
int stm_check_wakeup_devices(struct stm_wakeup_devices *wkd) { stm_wake_init(wkd); bus_for_each_dev(&platform_bus_type, NULL, wkd, __check_wakeup_device); return 0; }
int msm_bus_device_remove(struct platform_device *pdev) { bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev); return 0; }
static int exists_disconnected_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, is_disconnected_device); }
static int msm_bus_device_probe(struct platform_device *pdev) { unsigned int i, ret; struct msm_bus_device_node_registration *pdata; /* If possible, get pdata from device-tree */ if (pdev->dev.of_node) pdata = msm_bus_of_to_pdata(pdev); else { pdata = (struct msm_bus_device_node_registration *)pdev-> dev.platform_data; } if (IS_ERR_OR_NULL(pdata)) { MSM_BUS_ERR("No platform data found"); ret = -ENODATA; goto exit_device_probe; } for (i = 0; i < pdata->num_devices; i++) { struct device *node_dev = NULL; node_dev = msm_bus_device_init(&pdata->info[i]); if (!node_dev) { MSM_BUS_ERR("%s: Error during dev init for %d", __func__, pdata->info[i].node_info->id); ret = -ENXIO; goto exit_device_probe; } ret = msm_bus_init_clk(node_dev, &pdata->info[i]); /*Is this a fabric device ?*/ if (pdata->info[i].node_info->is_fab_dev) { MSM_BUS_DBG("%s: %d is a fab", __func__, pdata->info[i].node_info->id); ret = msm_bus_fabric_init(node_dev, &pdata->info[i]); if (ret) { MSM_BUS_ERR("%s: Error intializing fab %d", __func__, pdata->info[i].node_info->id); goto exit_device_probe; } } } ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_setup_dev_conn); if (ret) { MSM_BUS_ERR("%s: Error setting up dev connections", __func__); goto exit_device_probe; } ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos); if (ret) { MSM_BUS_ERR("%s: Error during qos init", __func__); goto exit_device_probe; } bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug); /* Register the arb layer ops */ msm_bus_arb_setops_adhoc(&arb_ops); devm_kfree(&pdev->dev, pdata->info); devm_kfree(&pdev->dev, pdata); exit_device_probe: return ret; }
/** * driver_attach - try to bind driver to devices. * @drv: driver. * * Walk the list of devices that the bus has on it and try to * match the driver with each one. If driver_probe_device() * returns 0 and the @dev->driver is set, we've found a * compatible pair. */ void driver_attach(struct device_driver * drv) { bus_for_each_dev(drv->bus, NULL, drv, __driver_attach); }
/** * mcb_bus_add_devices() - Add devices in the bus' internal device list * @bus: The @mcb_bus we add the devices * * Add devices in the bus' internal device list to the system. */ void mcb_bus_add_devices(const struct mcb_bus *bus) { bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices); bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_child); }
/** * msm_bus_scale_client_update_request() - Update the request for bandwidth * from a particular client * * cl: Handle to the client * index: Index into the vector, to which the bw and clock values need to be * updated */ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index) { int i, ret = 0; struct msm_bus_scale_pdata *pdata; int pnode, src, curr, ctx; unsigned long req_clk, req_bw, curr_clk, curr_bw; struct msm_bus_client *client = (struct msm_bus_client *)cl; if (IS_ERR(client)) { MSM_BUS_ERR("msm_bus_scale_client update req error %d\n", (uint32_t)client); return -ENXIO; } mutex_lock(&msm_bus_lock); if (client->curr == index) goto err; curr = client->curr; pdata = client->pdata; if (index >= pdata->num_usecases) { MSM_BUS_ERR("Client %u passed invalid index: %d\n", (uint32_t)client, index); ret = -ENXIO; goto err; } MSM_BUS_DBG("cl: %u index: %d curr: %d" " num_paths: %d\n", cl, index, client->curr, client->pdata->usecase->num_paths); for (i = 0; i < pdata->usecase->num_paths; i++) { src = msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].src); goto err; } if (msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].dst) == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].dst); } pnode = client->src_pnode[i]; req_clk = client->pdata->usecase[index].vectors[i].ib; req_bw = client->pdata->usecase[index].vectors[i].ab; if (curr < 0) { curr_clk = 0; curr_bw = 0; } else { curr_clk = client->pdata->usecase[curr].vectors[i].ib; curr_bw = client->pdata->usecase[curr].vectors[i].ab; MSM_BUS_DBG("ab: %lu ib: %lu\n", curr_bw, curr_clk); } if (!pdata->active_only) { ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, 0, pdata->active_only); if (ret) { MSM_BUS_ERR("Update path failed! %d\n", ret); goto err; } } ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, ACTIVE_CTX, pdata->active_only); if (ret) { MSM_BUS_ERR("Update Path failed! %d\n", ret); goto err; } } client->curr = index; ctx = ACTIVE_CTX; msm_bus_dbg_client_data(client->pdata, index, cl); bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn); err: mutex_unlock(&msm_bus_lock); return ret; }
void hsi_bus_exit(void) { bus_for_each_dev(&hsi_bus_type, NULL, NULL, hsi_bus_unreg_dev); bus_unregister(&hsi_bus_type); }
static int msm_pil_shutdown_at_boot(void) { return bus_for_each_dev(&pil_bus_type, NULL, NULL, __msm_pil_shutdown); }
static int __init omap_device_late_init(void) { bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); return 0; }
/** * update_path() - Update the path with the bandwidth and clock values, as * requested by the client. * * @curr: Current source node, as specified in the client vector (master) * @pnode: The first-hop node on the path, stored in the internal client struct * @req_clk: Requested clock value from the vector * @req_bw: Requested bandwidth value from the vector * @curr_clk: Current clock frequency * @curr_bw: Currently allocated bandwidth * * This function updates the nodes on the path calculated using getpath(), with * clock and bandwidth values. The sum of bandwidths, and the max of clock * frequencies is calculated at each node on the path. Commit data to be sent * to RPM for each master and slave is also calculated here. */ static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw, uint64_t curr_clk, uint64_t curr_bw, unsigned int ctx, unsigned int cl_active_flag) { int index, ret = 0; struct msm_bus_inode_info *info; struct msm_bus_inode_info *src_info; int next_pnode; int64_t add_bw = req_bw - curr_bw; uint64_t bwsum = 0; uint64_t req_clk_hz, curr_clk_hz, bwsum_hz; int *master_tiers; struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device (GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Bus device for bus ID: %d not found!\n", GET_FABID(curr)); return -ENXIO; } MSM_BUS_DBG("args: %d %d %d %llu %llu %llu %llu %u\n", curr, GET_NODE(pnode), GET_INDEX(pnode), req_clk, req_bw, curr_clk, curr_bw, ctx); index = GET_INDEX(pnode); MSM_BUS_DBG("Client passed index :%d\n", index); info = fabdev->algo->find_node(fabdev, curr); if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); return -ENXIO; } src_info = info; info->link_info.sel_bw = &info->link_info.bw[ctx]; info->link_info.sel_clk = &info->link_info.clk[ctx]; *info->link_info.sel_bw += add_bw; info->pnode[index].sel_bw = &info->pnode[index].bw[ctx]; /** * To select the right clock, AND the context with * client active flag. */ info->pnode[index].sel_clk = &info->pnode[index].clk[ctx & cl_active_flag]; *info->pnode[index].sel_bw += add_bw; *info->pnode[index].sel_clk = req_clk; /** * If master supports dual configuration, check if * the configuration needs to be changed based on * incoming requests */ if (info->node_info->dual_conf) { uint64_t node_maxib = 0; node_maxib = get_node_maxib(info); fabdev->algo->config_master(fabdev, info, node_maxib, req_bw); } info->link_info.num_tiers = info->node_info->num_tiers; info->link_info.tier = info->node_info->tier; master_tiers = info->node_info->tier; do { struct msm_bus_inode_info *hop; fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); if (!fabdev) { MSM_BUS_ERR("Fabric not found\n"); return -ENXIO; } MSM_BUS_DBG("id: %d\n", info->node_info->priv_id); /* find next node and index */ next_pnode = info->pnode[index].next; curr = GET_NODE(next_pnode); index = GET_INDEX(next_pnode); MSM_BUS_DBG("id:%d, next: %d\n", info-> node_info->priv_id, curr); /* Get hop */ /* check if we are here as gateway, or does the hop belong to * this fabric */ if (IS_NODE(curr)) hop = fabdev->algo->find_node(fabdev, curr); else hop = fabdev->algo->find_gw_node(fabdev, curr); if (!hop) { MSM_BUS_ERR("Null Info found for hop\n"); return -ENXIO; } hop->link_info.sel_bw = &hop->link_info.bw[ctx]; hop->link_info.sel_clk = &hop->link_info.clk[ctx]; *hop->link_info.sel_bw += add_bw; hop->pnode[index].sel_bw = &hop->pnode[index].bw[ctx]; hop->pnode[index].sel_clk = &hop->pnode[index].clk[ctx & cl_active_flag]; if (!hop->node_info->buswidth) { MSM_BUS_WARN("No bus width found. Using default\n"); hop->node_info->buswidth = 8; } *hop->pnode[index].sel_clk = BW_TO_CLK_FREQ_HZ(hop->node_info-> buswidth, req_clk); *hop->pnode[index].sel_bw += add_bw; MSM_BUS_DBG("fabric: %d slave: %d, slave-width: %d info: %d\n", fabdev->id, hop->node_info->priv_id, hop->node_info-> buswidth, info->node_info->priv_id); /* Update Bandwidth */ fabdev->algo->update_bw(fabdev, hop, info, add_bw, master_tiers, ctx); bwsum = *hop->link_info.sel_bw; /* Update Fabric clocks */ curr_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, curr_clk); req_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, req_clk); bwsum_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, bwsum); /* Account for multiple channels if any */ if (hop->node_info->num_sports > 1) bwsum_hz = msm_bus_div64(hop->node_info->num_sports, bwsum_hz); MSM_BUS_DBG("AXI: Hop: %d, ports: %d, bwsum_hz: %llu\n", hop->node_info->id, hop->node_info->num_sports, bwsum_hz); MSM_BUS_DBG("up-clk: curr_hz: %llu, req_hz: %llu, bw_hz %llu\n", curr_clk, req_clk, bwsum_hz); ret = fabdev->algo->update_clks(fabdev, hop, index, curr_clk_hz, req_clk_hz, bwsum_hz, SEL_FAB_CLK, ctx, cl_active_flag); if (ret) MSM_BUS_WARN("Failed to update clk\n"); info = hop; } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id); /* Update BW, clk after exiting the loop for the last one */ if (!info) { MSM_BUS_ERR("Cannot find node info!\n"); return -ENXIO; } /* Update slave clocks */ ret = fabdev->algo->update_clks(fabdev, info, index, curr_clk_hz, req_clk_hz, bwsum_hz, SEL_SLAVE_CLK, ctx, cl_active_flag); if (ret) MSM_BUS_ERR("Failed to update clk\n"); if ((ctx == cl_active_flag) && ((src_info->node_info->nr_lim || src_info->node_info->rt_mas))) setup_nr_limits(curr, pnode); /* If freq is going down , apply the changes now before * we commit clk data. */ if ((req_clk < curr_clk) || (req_bw < curr_bw)) bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_limiter); return ret; }
/** * proc_scsi_show - show contents of /proc/scsi/scsi (attached devices) * @s: output goes here * @p: not used */ static int proc_scsi_show(struct seq_file *s, void *p) { seq_printf(s, "Attached devices:\n"); bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice); return 0; }
static int update_request_legacy(uint32_t cl, unsigned index) { int i, ret = 0; struct msm_bus_scale_pdata *pdata; int pnode, src = 0, curr, ctx; uint64_t req_clk = 0, req_bw = 0, curr_clk = 0, curr_bw = 0; struct msm_bus_client *client = (struct msm_bus_client *)cl; if (IS_ERR_OR_NULL(client)) { MSM_BUS_ERR("msm_bus_scale_client update req error %d\n", (uint32_t)client); return -ENXIO; } mutex_lock(&msm_bus_lock); if (client->curr == index) goto err; curr = client->curr; pdata = client->pdata; if (!pdata) { MSM_BUS_ERR("Null pdata passed to update-request\n"); ret = -ENXIO; goto err; } if (index >= pdata->num_usecases) { MSM_BUS_ERR("Client %u passed invalid index: %d\n", (uint32_t)client, index); ret = -ENXIO; goto err; } MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n", cl, index, client->curr, client->pdata->usecase->num_paths); for (i = 0; i < pdata->usecase->num_paths; i++) { src = msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].src); goto err; } if (msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].dst) == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].dst); } pnode = client->src_pnode[i]; req_clk = client->pdata->usecase[index].vectors[i].ib; req_bw = client->pdata->usecase[index].vectors[i].ab; if (curr < 0) { curr_clk = 0; curr_bw = 0; } else { curr_clk = client->pdata->usecase[curr].vectors[i].ib; curr_bw = client->pdata->usecase[curr].vectors[i].ab; MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk); } if (!pdata->active_only) { ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, 0, pdata->active_only); if (ret) { MSM_BUS_ERR("Update path failed! %d\n", ret); goto err; } } ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, ACTIVE_CTX, pdata->active_only); if (ret) { MSM_BUS_ERR("Update Path failed! %d\n", ret); goto err; } } client->curr = index; ctx = ACTIVE_CTX; msm_bus_dbg_client_data(client->pdata, index, cl); bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn); /* For NR/RT limited masters, if freq is going up , apply the changes * after we commit clk data. */ if (is_nr_lim(src) && ((req_clk > curr_clk) || (req_bw > curr_bw))) bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_limiter); err: mutex_unlock(&msm_bus_lock); return ret; }
/** * msm_bus_scale_client_update_request() - Update the request for bandwidth * from a particular client * * cl: Handle to the client * index: Index into the vector, to which the bw and clock values need to be * updated */ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index) { int i, ret = 0; struct msm_bus_scale_pdata *pdata; int pnode, src, curr, ctx; uint64_t req_clk, req_bw, curr_clk, curr_bw; struct msm_bus_client *client = (struct msm_bus_client *)cl; #ifdef DEBUG_MSM_BUS_ARB_REQ static int log_cnt = 0; #endif if (IS_ERR_OR_NULL(client)) { MSM_BUS_ERR("msm_bus_scale_client update req error %d\n", (uint32_t)client); return -ENXIO; } #ifdef SEC_FEATURE_USE_RT_MUTEX rt_mutex_lock(&msm_bus_lock); #else mutex_lock(&msm_bus_lock); #endif if (client->curr == index) goto err; curr = client->curr; pdata = client->pdata; if (!pdata) { MSM_BUS_ERR("Null pdata passed to update-request\n"); return -ENXIO; } if (index >= pdata->num_usecases) { MSM_BUS_ERR("Client %u passed invalid index: %d\n", (uint32_t)client, index); ret = -ENXIO; goto err; } MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n", cl, index, client->curr, client->pdata->usecase->num_paths); for (i = 0; i < pdata->usecase->num_paths; i++) { src = msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].src); if (src == -ENXIO) { MSM_BUS_ERR("Master %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].src); goto err; } if (msm_bus_board_get_iid(client->pdata->usecase[index]. vectors[i].dst) == -ENXIO) { MSM_BUS_ERR("Slave %d not supported. Request cannot" " be updated\n", client->pdata->usecase-> vectors[i].dst); } pnode = client->src_pnode[i]; req_clk = client->pdata->usecase[index].vectors[i].ib; req_bw = client->pdata->usecase[index].vectors[i].ab; #ifdef DEBUG_MSM_BUS_ARB_REQ //Debug code to collect client info { struct msm_bus_fabric_device *fabdev_d = msm_bus_get_fabric_device(GET_FABID(src)); if (MSM_BUS_FAB_APPSS == fabdev_d->id) { if (log_cnt >= 1000) log_cnt = 0; log_req[log_cnt].ab = client->pdata->usecase[index].vectors[i].ab; log_req[log_cnt].ib = client->pdata->usecase[index].vectors[i].ib; log_req[log_cnt].src = client->pdata->usecase[index].vectors[i].src; log_req[log_cnt].dst = client->pdata->usecase[index].vectors[i].dst; log_req[log_cnt].cnt = arch_counter_get_cntpct(); strncpy(log_req[log_cnt].name, client->pdata->name, 19); log_cnt++; //printk("*** cl: %s ab: %llu ib: %llu\n", client->pdata->name, req_bw, req_clk); } } #endif if (curr < 0) { curr_clk = 0; curr_bw = 0; } else { curr_clk = client->pdata->usecase[curr].vectors[i].ib; curr_bw = client->pdata->usecase[curr].vectors[i].ab; MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk); } if (!pdata->active_only) { ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, 0, pdata->active_only); if (ret) { MSM_BUS_ERR("Update path failed! %d\n", ret); goto err; } } ret = update_path(src, pnode, req_clk, req_bw, curr_clk, curr_bw, ACTIVE_CTX, pdata->active_only); if (ret) { MSM_BUS_ERR("Update Path failed! %d\n", ret); goto err; } } client->curr = index; ctx = ACTIVE_CTX; msm_bus_dbg_client_data(client->pdata, index, cl); bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn); err: #ifdef SEC_FEATURE_USE_RT_MUTEX rt_mutex_unlock(&msm_bus_lock); #else mutex_unlock(&msm_bus_lock); #endif return ret; }
void dss_disable_all_devices(void) { struct bus_type *bus = dss_get_bus(); bus_for_each_dev(bus, NULL, NULL, dss_disable_device); }
static int exists_non_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, non_essential_device_connecting); }
/** * bus_rescan_devices - rescan devices on the bus for possible drivers * @bus: the bus to scan. * * This function will look for devices on the bus with no driver * attached and rescan it against existing drivers to see if it matches * any by calling device_attach() for the unbound devices. */ void bus_rescan_devices(struct bus_type * bus) { bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); }
void xenbus_backend_resume(int (*fn)(struct device *, void *)) { DPRINTK(""); if (!xenbus_backend.error) bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn); }