Exemplo n.º 1
0
/*
 * Perform (stime * rtime) / total with reduced chances
 * of multiplication overflows by using smaller factors
 * like quotient and remainders of divisions between
 * rtime and total.
 */
static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
{
    u64 rem, res, scaled;

    if (rtime >= total) {
        /*
         * Scale up to rtime / total then add
         * the remainder scaled to stime / total.
         */
        res = div64_u64_rem(rtime, total, &rem);
        scaled = stime * res;
        scaled += div64_u64(stime * rem, total);
    } else {
        /*
         * Same in reverse: scale down to total / rtime
         * then substract that result scaled to
         * to the remaining part.
         */
        res = div64_u64_rem(total, rtime, &rem);
        scaled = div64_u64(stime, res);
        scaled -= div64_u64(scaled * rem, total);
    }

    return (__force cputime_t) scaled;
}
Exemplo n.º 2
0
/**
 * Some iProc SoCs require the SW to configure the outbound address mapping
 *
 * Outbound address translation:
 *
 * iproc_pcie_address = axi_address - axi_offset
 * OARR = iproc_pcie_address
 * OMAP = pci_addr
 *
 * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address
 */
static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
			       u64 pci_addr, resource_size_t size)
{
	struct iproc_pcie_ob *ob = &pcie->ob;
	struct device *dev = pcie->dev;
	unsigned i;
	u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
	u64 remainder;

	if (size > max_size) {
		dev_err(dev,
			"res size %pap exceeds max supported size 0x%llx\n",
			&size, max_size);
		return -EINVAL;
	}

	div64_u64_rem(size, ob->window_size, &remainder);
	if (remainder) {
		dev_err(dev,
			"res size %pap needs to be multiple of window size %pap\n",
			&size, &ob->window_size);
		return -EINVAL;
	}

	if (axi_addr < ob->axi_offset) {
		dev_err(dev, "axi address %pap less than offset %pap\n",
			&axi_addr, &ob->axi_offset);
		return -EINVAL;
	}

	/*
	 * Translate the AXI address to the internal address used by the iProc
	 * PCIe core before programming the OARR
	 */
	axi_addr -= ob->axi_offset;

	for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
		iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
				    lower_32_bits(axi_addr) | OARR_VALID |
				    (ob->set_oarr_size ? 1 : 0));
		iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
				    upper_32_bits(axi_addr));
		iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
				    lower_32_bits(pci_addr));
		iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
				    upper_32_bits(pci_addr));

		size -= ob->window_size;
		if (size == 0)
			break;

		axi_addr += ob->window_size;
		pci_addr += ob->window_size;
	}

	return 0;
}
Exemplo n.º 3
0
/**
 * Add a bio to a logpack header.
 * Almost the same as walb_logpack_header_add_req().
 * Do not validate checksum.
 *
 * REQ_DISCARD is supported.
 *
 * @lhead log pack header.
 *   lhead->logpack_lsid must be set correctly.
 *   lhead->sector_type must be set correctly.
 * @logpack_lsid lsid of the log pack.
 * @bio bio to add. must be write and its size >= 0.
 *	size == 0 is permitted with flush requests only.
 * @pbs physical block size.
 * @ring_buffer_size ring buffer size [physical block]
 *
 * RETURN:
 *   true in success, or false (you must create new logpack for the bio).
 */
bool walb_logpack_header_add_bio(
	struct walb_logpack_header *lhead,
	const struct bio *bio,
	unsigned int pbs, u64 ring_buffer_size)
{
	u64 logpack_lsid;
	u64 bio_lsid;
	unsigned int bio_lb, bio_pb;
	u64 padding_pb;
	unsigned int max_n_rec;
	int idx;
	bool is_discard;
	UNUSED const char no_more_bio_msg[] = "no more bio can not be added.\n";

	ASSERT(lhead);
	ASSERT(lhead->sector_type == SECTOR_TYPE_LOGPACK);
	ASSERT(bio);
	ASSERT_PBS(pbs);
	ASSERT(bio->bi_rw & REQ_WRITE);
	ASSERT(ring_buffer_size > 0);

	logpack_lsid = lhead->logpack_lsid;
	max_n_rec = max_n_log_record_in_sector(pbs);
	idx = lhead->n_records;

	ASSERT(lhead->n_records <= max_n_rec);
	if (lhead->n_records == max_n_rec) {
		LOG_(no_more_bio_msg);
		return false;
	}

	bio_lsid = logpack_lsid + 1 + lhead->total_io_size;
	bio_lb = bio_sectors(bio);
	if (bio_lb == 0) {
		/* Only flush requests can have zero-size. */
		ASSERT(bio->bi_rw & REQ_FLUSH);
		/* Currently a zero-flush must be alone. */
		ASSERT(idx == 0);
		return true;
	}
	ASSERT(0 < bio_lb);
	bio_pb = capacity_pb(pbs, bio_lb);
	is_discard = ((bio->bi_rw & REQ_DISCARD) != 0);
	if (!is_discard)
		ASSERT(bio_lb <= WALB_MAX_NORMAL_IO_SECTORS);

	/* Padding check. */
	{
		u64 rem;
		div64_u64_rem(bio_lsid, ring_buffer_size, &rem);
		padding_pb = ring_buffer_size - rem;
	}
	if (!is_discard && padding_pb < bio_pb) {
		/* Log of this request will cross the end of ring buffer.
		   So padding is required. */
		u64 cap_lb;

		if (lhead->total_io_size + padding_pb
			> MAX_TOTAL_IO_SIZE_IN_LOGPACK_HEADER) {
			LOG_(no_more_bio_msg);
			return false;
		}

		/* Fill the padding record contents. */
		set_bit_u32(LOG_RECORD_PADDING, &lhead->record[idx].flags);
		set_bit_u32(LOG_RECORD_EXIST, &lhead->record[idx].flags);
		lhead->record[idx].lsid = bio_lsid;
		ASSERT(bio_lsid - logpack_lsid <= UINT16_MAX);
		lhead->record[idx].lsid_local = (u16)(bio_lsid - logpack_lsid);
		lhead->record[idx].offset = 0;
		cap_lb = capacity_lb(pbs, padding_pb);
		ASSERT(cap_lb <= UINT16_MAX);
		lhead->record[idx].io_size = (u16)cap_lb;
		lhead->n_padding++;
		lhead->n_records++;
		lhead->total_io_size += padding_pb;

		bio_lsid += padding_pb;
		idx++;
		ASSERT(bio_lsid == logpack_lsid + 1 + lhead->total_io_size);

		if (lhead->n_records == max_n_rec) {
			/* The last record is padding. */
			LOG_(no_more_bio_msg);
			return false;
		}
	}

	if (!is_discard &&
		lhead->total_io_size + bio_pb
		> MAX_TOTAL_IO_SIZE_IN_LOGPACK_HEADER) {
		LOG_(no_more_bio_msg);
		return false;
	}

	/* Fill the log record contents. */
	set_bit_u32(LOG_RECORD_EXIST, &lhead->record[idx].flags);
	clear_bit_u32(LOG_RECORD_PADDING, &lhead->record[idx].flags);
	lhead->record[idx].lsid = bio_lsid;
	lhead->record[idx].lsid_local = (u16)(bio_lsid - logpack_lsid);
	lhead->record[idx].offset = (u64)bio->bi_iter.bi_sector;
	lhead->record[idx].io_size = (u32)bio_lb;
	lhead->n_records++;
	if (is_discard) {
		set_bit_u32(LOG_RECORD_DISCARD, &lhead->record[idx].flags);
		/* lhead->total_io_size will not be added. */
	} else {
		clear_bit_u32(LOG_RECORD_DISCARD, &lhead->record[idx].flags);
		lhead->total_io_size += bio_pb;
	}
	return true;
}