/* * dwc3_frame_length_adjustment - Adjusts frame length if required * @dwc3: Pointer to our controller context structure * @fladj: Value of GFLADJ_30MHZ and GFLADJ_REFCLK_FLADJ to adjust frame length * @refclk_fladj: Boolean to update GFLADJ_REFCLK_FLADJ field also */ static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj, bool refclk_fladj) { u32 reg; u32 dft; if (dwc->revision < DWC3_REVISION_250A) return; if (fladj == 0) return; reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); if (refclk_fladj) { if (!dev_WARN_ONCE(dwc->dev, ((reg & DWC3_GFLADJ_REFCLK_FLADJ) == (fladj & DWC3_GFLADJ_REFCLK_FLADJ)), "refclk fladj request value same as default, ignoring\n")) { reg &= ~DWC3_GFLADJ_REFCLK_FLADJ; reg |= (fladj & DWC3_GFLADJ_REFCLK_FLADJ); } } dft = reg & DWC3_GFLADJ_30MHZ_MASK; if (!dev_WARN_ONCE(dwc->dev, dft == fladj, "request value same as default, ignoring\n")) { reg &= ~DWC3_GFLADJ_30MHZ_MASK; reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | fladj; dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); } }
static int nd_namespace_label_update(struct nd_region *nd_region, struct device *dev) { dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim, "namespace must be idle during label update\n"); if (dev->driver || to_ndns(dev)->claim) return 0; /* * Only allow label writes that will result in a valid namespace * or deletion of an existing namespace. */ if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); resource_size_t size = resource_size(&nspm->nsio.res); if (size == 0 && nspm->uuid) /* delete allocation */; else if (!nspm->uuid) return 0; return nd_pmem_namespace_label_update(nd_region, nspm, size); } else if (is_namespace_blk(dev)) { struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); resource_size_t size = nd_namespace_blk_size(nsblk); if (size == 0 && nsblk->uuid) /* delete allocation */; else if (!nsblk->uuid || !nsblk->lbasize) return 0; return nd_blk_namespace_label_update(nd_region, nsblk, size); } else return -ENXIO; }
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, char *name) { struct nd_region *nd_region = to_nd_region(ndns->dev.parent); const char *suffix = NULL; if (ndns->claim) { if (is_nd_btt(ndns->claim)) suffix = "s"; else if (is_nd_pfn(ndns->claim)) suffix = "m"; else dev_WARN_ONCE(&ndns->dev, 1, "unknown claim type by %s\n", dev_name(ndns->claim)); } if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) { if (!suffix && pmem_should_map_pages(&ndns->dev)) suffix = "m"; sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : ""); } else if (is_namespace_blk(&ndns->dev)) { struct nd_namespace_blk *nsblk; nsblk = to_nd_namespace_blk(&ndns->dev); sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id, suffix ? suffix : ""); } else { return NULL; } return name; }
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk, resource_size_t ns_offset, unsigned int len) { int i; for (i = 0; i < nsblk->num_resources; i++) { if (ns_offset < resource_size(nsblk->res[i])) { if (ns_offset + len > resource_size(nsblk->res[i])) { dev_WARN_ONCE(&nsblk->common.dev, 1, "illegal request\n"); return SIZE_MAX; } return nsblk->res[i]->start + ns_offset; } ns_offset -= resource_size(nsblk->res[i]); } dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n"); return SIZE_MAX; }
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns) { struct nd_namespace_common *ndns = *_ndns; dev_WARN_ONCE(dev, !mutex_is_locked(&ndns->dev.mutex) || ndns->claim != dev, "%s: invalid claim\n", __func__); ndns->claim = NULL; *_ndns = NULL; put_device(&ndns->dev); }
static void __nd_btt_detach_ndns(struct nd_btt *nd_btt) { struct nd_namespace_common *ndns = nd_btt->ndns; dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex) || ndns->claim != &nd_btt->dev, "%s: invalid claim\n", __func__); ndns->claim = NULL; nd_btt->ndns = NULL; put_device(&ndns->dev); }
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev) { struct device *dev; for (dev = nd_dev; dev; dev = dev->parent) if (dev->release == nvdimm_bus_release) break; dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n"); if (dev) return to_nvdimm_bus(dev); return NULL; }
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, struct nd_namespace_common **_ndns) { if (attach->claim) return false; dev_WARN_ONCE(dev, !mutex_is_locked(&attach->dev.mutex) || *_ndns, "%s: invalid claim\n", __func__); attach->claim = dev; *_ndns = attach; get_device(&attach->dev); return true; }
static bool __nd_btt_attach_ndns(struct nd_btt *nd_btt, struct nd_namespace_common *ndns) { if (ndns->claim) return false; dev_WARN_ONCE(&nd_btt->dev, !mutex_is_locked(&ndns->dev.mutex) || nd_btt->ndns, "%s: invalid claim\n", __func__); ndns->claim = &nd_btt->dev; nd_btt->ndns = ndns; get_device(&ndns->dev); return true; }
static int pmem_rw_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, int rw) { struct pmem_device *pmem = dev_get_drvdata(ndns->claim); if (unlikely(offset + size > pmem->size)) { dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); return -EFAULT; } if (rw == READ) memcpy_from_pmem(buf, pmem->virt_addr + offset, size); else { memcpy_to_pmem(pmem->virt_addr + offset, buf, size); wmb_pmem(); } return 0; }
static void devm_memremap_pages_release(struct device *dev, void *data) { struct page_map *page_map = data; struct resource *res = &page_map->res; resource_size_t align_start, align_size; struct dev_pagemap *pgmap = &page_map->pgmap; if (percpu_ref_tryget_live(pgmap->ref)) { dev_WARN(dev, "%s: page mapping is still live!\n", __func__); percpu_ref_put(pgmap->ref); } /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(resource_size(res), SECTION_SIZE); arch_remove_memory(align_start, align_size); pgmap_radix_release(res); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); }
/* * dwc3_frame_length_adjustment - Adjusts frame length if required * @dwc3: Pointer to our controller context structure */ static void dwc3_frame_length_adjustment(struct dwc3 *dwc) { u32 reg; u32 dft; if (dwc->revision < DWC3_REVISION_250A) return; if (dwc->fladj == 0) return; reg = dwc3_readl(dwc->regs, DWC3_GFLADJ); dft = reg & DWC3_GFLADJ_30MHZ_MASK; if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj, "request value same as default, ignoring\n")) { reg &= ~DWC3_GFLADJ_30MHZ_MASK; reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj; dwc3_writel(dwc->regs, DWC3_GFLADJ, reg); } }
static ssize_t available_slots_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); nfree = 0; } else nfree--; rc = sprintf(buf, "%d\n", nfree); nvdimm_bus_unlock(dev); return rc; }
/** * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ * @dev: Device entry * @irq: Device wake-up capable interrupt * @wirq: Wake irq specific data * * Internal function to attach either a device IO interrupt or a * dedicated wake-up interrupt as a wake IRQ. */ static int dev_pm_attach_wake_irq(struct device *dev, int irq, struct wake_irq *wirq) { unsigned long flags; if (!dev || !wirq) return -EINVAL; spin_lock_irqsave(&dev->power.lock, flags); if (dev_WARN_ONCE(dev, dev->power.wakeirq, "wake irq already initialized\n")) { spin_unlock_irqrestore(&dev->power.lock, flags); return -EEXIST; } dev->power.wakeirq = wirq; device_wakeup_attach_irq(dev, wirq); spin_unlock_irqrestore(&dev->power.lock, flags); return 0; }
static int nsio_rw_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, int rw) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); if (unlikely(offset + size > nsio->size)) { dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); return -EFAULT; } if (rw == READ) { unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align))) return -EIO; return memcpy_from_pmem(buf, nsio->addr + offset, size); } else { memcpy_to_pmem(nsio->addr + offset, buf, size); nvdimm_flush(to_nd_region(ndns->dev.parent)); } return 0; }
/* * Prepare controller for a transaction and call i2c_dw_xfer_msg. */ static int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); int ret; dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); pm_runtime_get_sync(dev->dev); if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) { ret = -ESHUTDOWN; goto done_nolock; } reinit_completion(&dev->cmd_complete); dev->msgs = msgs; dev->msgs_num = num; dev->cmd_err = 0; dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; dev->status = STATUS_IDLE; dev->abort_source = 0; dev->rx_outstanding = 0; ret = i2c_dw_acquire_lock(dev); if (ret) goto done_nolock; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) goto done; /* Start the transfers */ i2c_dw_xfer_init(dev); /* Wait for tx to complete */ if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { dev_err(dev->dev, "controller timed out\n"); /* i2c_dw_init implicitly disables the adapter */ i2c_recover_bus(&dev->adapter); i2c_dw_init_master(dev); ret = -ETIMEDOUT; goto done; } /* * We must disable the adapter before returning and signaling the end * of the current transfer. Otherwise the hardware might continue * generating interrupts which in turn causes a race condition with * the following transfer. Needs some more investigation if the * additional interrupts are a hardware bug or this driver doesn't * handle them correctly yet. */ __i2c_dw_disable_nowait(dev); if (dev->msg_err) { ret = dev->msg_err; goto done; } /* No error */ if (likely(!dev->cmd_err && !dev->status)) { ret = num; goto done; } /* We have an error */ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { ret = i2c_dw_handle_tx_abort(dev); goto done; } if (dev->status) dev_err(dev->dev, "transfer terminated early - interrupt latency too high?\n"); ret = -EIO; done: i2c_dw_release_lock(dev); done_nolock: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; }