int set_ddr_laws(u64 start, u64 sz, enum law_trgt_if id) { immap_t *immap = (immap_t *)CONFIG_SYS_IMMR; law83xx_t *ecm = &immap->sysconf.ddrlaw[0]; u64 start_align, law_sz; int law_sz_enc; if (start == 0) start_align = 1ull << (LAW_SIZE_2G + 1); else start_align = 1ull << (__ffs64(start) - 1); law_sz = min(start_align, sz); law_sz_enc = __ilog2_u64(law_sz) - 1; /* * Set up LAWBAR for all of DDR. */ ecm->bar = start & 0xfffff000; ecm->ar = (LAWAR_EN | (id << 20) | (LAWAR_SIZE & law_sz_enc)); debug("DDR:bar=0x%08x\n", ecm->bar); debug("DDR:ar=0x%08x\n", ecm->ar); /* recalculate size based on what was actually covered by the law */ law_sz = 1ull << __ilog2_u64(law_sz); /* do we still have anything to map */ sz = sz - law_sz; if (sz) { start += law_sz; start_align = 1ull << (__ffs64(start) - 1); law_sz = min(start_align, sz); law_sz_enc = __ilog2_u64(law_sz) - 1; ecm = &immap->sysconf.ddrlaw[1]; ecm->bar = start & 0xfffff000; ecm->ar = (LAWAR_EN | (id << 20) | (LAWAR_SIZE & law_sz_enc)); debug("DDR:bar=0x%08x\n", ecm->bar); debug("DDR:ar=0x%08x\n", ecm->ar); } else { return 0; } /* do we still have anything to map */ sz = sz - law_sz; if (sz) return 1; return 0; }
int nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) { struct nouveau_sclass *sclass; struct nouveau_engine *engine; struct nouveau_oclass *oclass; int nr = -1, i; u64 mask; sclass = nv_parent(parent)->sclass; while (sclass) { if (++nr < size) lclass[nr] = sclass->oclass->handle; sclass = sclass->sclass; } mask = nv_parent(parent)->engine; while (i = __ffs64(mask), mask) { engine = nouveau_engine(parent, i); if (engine && (oclass = engine->sclass)) { while (oclass->ofuncs) { if (++nr < size) lclass[nr] = oclass->handle; oclass++; } } mask &= ~(1ULL << i); } return nr + 1; }
static int __init xio_hello_init_module(void) { int iov_len = SG_TBL_LEN; if (parse_cmdline(&test_config, xio_argv)) return -EINVAL; atomic_set(&module_state, 1); init_completion(&cleanup_complete); /* set accelio max message vector used */ xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_IN_IOVLEN, &iov_len, sizeof(int)); xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_OUT_IOVLEN, &iov_len, sizeof(int)); xio_main_th = kthread_create(xio_server_main, xio_argv, "xio-hello-server"); if (IS_ERR(xio_main_th)) { complete(&cleanup_complete); return PTR_ERR(xio_main_th); } if (test_config.cpu_mask) { g_test_params.cpu = __ffs64(test_config.cpu_mask); pr_info("cpu is %d\n", g_test_params.cpu); kthread_bind(xio_main_th, g_test_params.cpu); } wake_up_process(xio_main_th); return 0; }
static int __init xio_lat_init_module(void) { int opt = 1; if (parse_cmdline(&test_config, xio_argv)) return -EINVAL; atomic_set(&module_state, 1); init_completion(&cleanup_complete); /* disable nagle algorithm for tcp */ xio_set_opt(NULL, XIO_OPTLEVEL_TCP, XIO_OPTNAME_TCP_NO_DELAY, &opt, sizeof(int)); xio_main_th = kthread_create(xio_client_main, xio_argv, "xio-hello-client"); if (IS_ERR(xio_main_th)) { complete(&cleanup_complete); return PTR_ERR(xio_main_th); } if (test_config.cpu_mask) { cpu = __ffs64(test_config.cpu_mask); pr_info("cpu is %d\n", cpu); kthread_bind(xio_main_th, cpu); } wake_up_process(xio_main_th); return 0; }
static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func) { u64 val64, sz64, mask64; void __iomem *base; u32 val, sz; u8 byte; val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0); sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val); if (val == 0xffffffff || sz == 0xffffffff) { pr_notice("invalid mmio bar\n"); return NULL; } val64 = val & PCI_BASE_ADDRESS_MEM_MASK; sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; mask64 = PCI_BASE_ADDRESS_MEM_MASK; if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0); sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4); write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val); val64 |= (u64)val << 32; sz64 |= (u64)sz << 32; mask64 |= ~0ULL << 32; } sz64 &= mask64; if (!sz64) { pr_notice("invalid mmio address\n"); return NULL; } sz64 = 1ULL << __ffs64(sz64); /* Check if the mem space is enabled: */ byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND); if (!(byte & PCI_COMMAND_MEMORY)) { byte |= PCI_COMMAND_MEMORY; write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte); } xdbc.xhci_start = val64; xdbc.xhci_length = sz64; base = early_ioremap(val64, sz64); return base; }
static int pp_find_next_peer(struct pp_ctx *pp) { u64 link, out_db; int pidx; link = ntb_link_is_up(pp->ntb, NULL, NULL); /* Find next available peer */ if (link & pp->nmask) pidx = __ffs64(link & pp->nmask); else if (link & pp->pmask) pidx = __ffs64(link & pp->pmask); else return -ENODEV; out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx)); spin_lock(&pp->lock); pp->out_pidx = pidx; pp->out_db = out_db; spin_unlock(&pp->lock); return 0; }
int nouveau_parent_sclass(struct nouveau_object *parent, u16 handle, struct nouveau_object **pengine, struct nouveau_oclass **poclass) { struct nouveau_sclass *sclass; struct nouveau_engine *engine; struct nouveau_oclass *oclass; u64 mask; sclass = nv_parent(parent)->sclass; while (sclass) { if ((sclass->oclass->handle & 0xffff) == handle) { *pengine = parent->engine; *poclass = sclass->oclass; return 0; } sclass = sclass->sclass; } mask = nv_parent(parent)->engine; while (mask) { int i = __ffs64(mask); if (nv_iclass(parent, NV_CLIENT_CLASS)) engine = nv_engine(nv_client(parent)->device); else engine = nouveau_engine(parent, i); if (engine) { oclass = engine->sclass; while (oclass->ofuncs) { if ((oclass->handle & 0xffff) == handle) { *pengine = nv_object(engine); *poclass = oclass; return 0; } oclass++; } } mask &= ~(1ULL << i); } return -EINVAL; }
static int nvkm_udevice_child_get(struct nvkm_object *object, int index, struct nvkm_oclass *oclass) { struct nvkm_udevice *udev = nvkm_udevice(object); struct nvkm_device *device = udev->device; struct nvkm_engine *engine; u64 mask = (1ULL << NVKM_ENGINE_DMAOBJ) | (1ULL << NVKM_ENGINE_FIFO) | (1ULL << NVKM_ENGINE_DISP) | (1ULL << NVKM_ENGINE_PM); const struct nvkm_device_oclass *sclass = NULL; int i; for (; i = __ffs64(mask), mask && !sclass; mask &= ~(1ULL << i)) { if (!(engine = nvkm_device_engine(device, i)) || !(engine->func->base.sclass)) continue; oclass->engine = engine; index -= engine->func->base.sclass(oclass, index, &sclass); } if (!sclass) { switch (index) { case 0: sclass = &nvkm_control_oclass; break; default: return -EINVAL; } oclass->base = sclass->base; } oclass->ctor = nvkm_udevice_child_new; oclass->priv = sclass; return 0; }
int __weak __ctzdi2(long val) { return __ffs64((u64)val); }