Ejemplo n.º 1
0
/**
 * intel_vgpu_emulate_mmio_read - emulate MMIO read
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: data return buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                 void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
                                                p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page read error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
                                               p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
        goto err;

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack) {
        gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

        if (offset == 0x206c) {
            gvt_err("------------------------------------------\n");
            gvt_err("vgpu%d: likely triggers a gfx reset\n",
                    vgpu->id);
            gvt_err("------------------------------------------\n");
            vgpu->mmio.disable_warn_untrack = true;
        }
    }

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }
        ret = mmio->read(vgpu, offset, p_data, bytes);
    } else
        ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);

    if (ret)
        goto err;

    intel_gvt_mmio_set_accessed(gvt, offset);
    mutex_unlock(&gvt->lock);
    return 0;
err:
    gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n",
            vgpu->id, offset, bytes);
    mutex_unlock(&gvt->lock);
    return ret;
}
Ejemplo n.º 2
0
}

int
main(int argc UNUSED, char** argv UNUSED)
{
  ALIGN(16) int array[2] = {0, 1};
  void* ptr = NULL;
  void* buffer = NULL;
  struct sl_flat_map* map = NULL;
  struct sl_pair pair = { NULL, NULL };
  size_t al = 0;
  size_t i = 0;
  size_t len = 0;
  size_t sz = 0;

  STATIC_ASSERT(!IS_ALIGNED(&array[1], 16), Unexpected_alignment);

  CHECK(sl_create_flat_map(0, 0, 0, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, 0, 0, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, ALK, 0, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, ALK, 0, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, 0, SZD, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, 0, SZD, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, ALK, SZD, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, ALK, SZD, 0, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, 0, 0, ALD, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, 0, 0, ALD, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, ALK, 0, ALD, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, ALK, 0, ALD, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(0, 0, SZD, ALD, NULL, NULL, NULL), BAD_ARG);
  CHECK(sl_create_flat_map(SZK, 0, SZD, ALD, NULL, NULL, NULL), BAD_ARG);
Ejemplo n.º 3
0
/**
 * intel_vgpu_emulate_mmio_write - emulate MMIO write
 * @vgpu: a vGPU
 * @pa: guest physical address
 * @p_data: write data buffer
 * @bytes: access data length
 *
 * Returns:
 * Zero on success, negative error code if failed
 */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                                  void *p_data, unsigned int bytes)
{
    struct intel_gvt *gvt = vgpu->gvt;
    struct intel_gvt_mmio_info *mmio;
    unsigned int offset = 0;
    u32 old_vreg = 0, old_sreg = 0;
    int ret = -EINVAL;

    mutex_lock(&gvt->lock);

    if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
        struct intel_vgpu_guest_page *gp;

        gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
        if (gp) {
            ret = gp->handler(gp, pa, p_data, bytes);
            if (ret) {
                gvt_err("vgpu%d: guest page write error %d, "
                        "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
                        vgpu->id, ret,
                        gp->gfn, pa, *(u32 *)p_data, bytes);
            }
            mutex_unlock(&gvt->lock);
            return ret;
        }
    }

    offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);

    if (WARN_ON(bytes > 8))
        goto err;

    if (reg_is_gtt(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
            goto err;
        if (WARN_ON(bytes != 4 && bytes != 8))
            goto err;
        if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
            goto err;

        ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
                                                p_data, bytes);
        if (ret)
            goto err;
        mutex_unlock(&gvt->lock);
        return ret;
    }

    if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
        ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
        mutex_unlock(&gvt->lock);
        return ret;
    }

    mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
    if (!mmio && !vgpu->mmio.disable_warn_untrack)
        gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
                vgpu->id, offset, bytes, *(u32 *)p_data);

    if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
        if (WARN_ON(!IS_ALIGNED(offset, bytes)))
            goto err;
    }

    if (mmio) {
        u64 ro_mask = mmio->ro_mask;

        if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
            if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
                goto err;
            if (WARN_ON(mmio->offset != offset))
                goto err;
        }

        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            old_vreg = vgpu_vreg(vgpu, offset);
            old_sreg = vgpu_sreg(vgpu, offset);
        }

        if (!ro_mask) {
            ret = mmio->write(vgpu, offset, p_data, bytes);
        } else {
            /* Protect RO bits like HW */
            u64 data = 0;

            /* all register bits are RO. */
            if (ro_mask == ~(u64)0) {
                gvt_err("vgpu%d: try to write RO reg %x\n",
                        vgpu->id, offset);
                ret = 0;
                goto out;
            }
            /* keep the RO bits in the virtual register */
            memcpy(&data, p_data, bytes);
            data &= ~mmio->ro_mask;
            data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask;
            ret = mmio->write(vgpu, offset, &data, bytes);
        }

        /* higher 16bits of mode ctl regs are mask bits for change */
        if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) {
            u32 mask = vgpu_vreg(vgpu, offset) >> 16;

            vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
                                      | (vgpu_vreg(vgpu, offset) & mask);
            vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
                                      | (vgpu_sreg(vgpu, offset) & mask);
        }
    } else
Ejemplo n.º 4
0
Archivo: boot_sys.c Proyecto: mrd/seL4
BOOT_CODE static paddr_t
load_boot_module(node_id_t node, multiboot_module_t* boot_module, paddr_t load_paddr)
{
    Elf32_Header_t* elf_file = (Elf32_Header_t*)boot_module->start;
    v_region_t v_reg;

    if (!elf32_checkFile(elf_file)) {
        printf("Boot module does not contain a valid ELF32 image\n");
        return 0;
    }

    v_reg = elf32_getMemoryBounds(elf_file);

    if (v_reg.end == 0) {
        printf("ELF32 image in boot module does not contain any segments\n");
        return 0;
    }
    v_reg.end = ROUND_UP(v_reg.end, PAGE_BITS);

    printf("size=0x%x v_entry=0x%x v_start=0x%x v_end=0x%x ",
           v_reg.end - v_reg.start,
           elf_file->e_entry,
           v_reg.start,
           v_reg.end
          );

    if (!IS_ALIGNED(v_reg.start, PAGE_BITS)) {
        printf("Userland image virtual start address must be 4KB-aligned\n");
        return 0;
    }
    if (v_reg.end + 2 * BIT(PAGE_BITS) > PPTR_BASE) {
        /* for IPC buffer frame and bootinfo frame, need 2*4K of additional userland virtual memory */
        printf("Userland image virtual end address too high\n");
        return 0;
    }
    if ((elf_file->e_entry < v_reg.start) || (elf_file->e_entry >= v_reg.end)) {
        printf("Userland image entry point does not lie within userland image\n");
        return 0;
    }

    /* fill ui_info struct */
    glks.ui_info_list[node].pv_offset = load_paddr - v_reg.start;
    glks.ui_info_list[node].p_reg.start = load_paddr;
    load_paddr += v_reg.end - v_reg.start;
    glks.ui_info_list[node].p_reg.end = load_paddr;
    glks.ui_info_list[node].v_entry = elf_file->e_entry;

    printf("p_start=0x%x p_end=0x%x\n",
           glks.ui_info_list[node].p_reg.start,
           glks.ui_info_list[node].p_reg.end
          );

    if (load_paddr > glks.avail_p_reg.end) {
        printf("End of loaded userland image lies outside of usable physical memory\n");
        return 0;
    }

    /* initialise all initial userland memory and load potentially sparse ELF image */
    memzero(
        (void*)glks.ui_info_list[node].p_reg.start,
        glks.ui_info_list[node].p_reg.end - glks.ui_info_list[node].p_reg.start
    );
    elf32_load(elf_file, glks.ui_info_list[node].pv_offset);

    return load_paddr;
}
Ejemplo n.º 5
0
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
		const struct dwc3_event_depevt *event)
{
	struct dwc3_request	*r = NULL;
	struct usb_request	*ur;
	struct dwc3_trb		*trb;
	struct dwc3_ep		*ep0;
	u32			transferred;
	u32			status;
	u32			length;
	u8			epnum;

	epnum = event->endpoint_number;
	ep0 = dwc->eps[0];

	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;

	trb = dwc->ep0_trb;

	trace_dwc3_complete_trb(ep0, trb);

	r = next_request(&ep0->request_list);
	if (!r)
		return;

	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
	if (status == DWC3_TRBSTS_SETUP_PENDING) {
		dwc3_trace(trace_dwc3_ep0, "Setup Pending received");

		if (r)
			dwc3_gadget_giveback(ep0, r, -ECONNRESET);

		return;
	}

	ur = &r->request;

	length = trb->size & DWC3_TRB_SIZE_MASK;

	if (dwc->ep0_bounced) {
		unsigned transfer_size = ur->length;
		unsigned maxp = ep0->endpoint.maxpacket;

		transfer_size += (maxp - (transfer_size % maxp));
		transferred = min_t(u32, ur->length,
				transfer_size - length);
		memcpy(ur->buf, dwc->ep0_bounce, transferred);
	} else {
		transferred = ur->length - length;
	}

	ur->actual += transferred;

	if ((epnum & 1) && ur->actual < ur->length) {
		/* for some reason we did not get everything out */

		dwc3_ep0_stall_and_restart(dwc);
	} else {
		dwc3_gadget_giveback(ep0, r, 0);

		if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
				ur->length && ur->zero) {
			int ret;

			dwc->ep0_next_event = DWC3_EP0_COMPLETE;

			ret = dwc3_ep0_start_trans(dwc, epnum,
					dwc->ctrl_req_addr, 0,
					DWC3_TRBCTL_CONTROL_DATA);
			WARN_ON(ret < 0);
		}
	}
}
Ejemplo n.º 6
0
Archivo: sl_vector.c Proyecto: vaplv/sl
EXPORT_SYM enum sl_error
sl_vector_insert_n
  (struct sl_vector* vec,
   size_t id,
   size_t count,
   const void* data)
{
  void* buffer = NULL;
  const void* src = NULL;
  void* dst = NULL;
  size_t i = 0;
  enum sl_error err = SL_NO_ERROR;

  if(!vec || (id > vec->length) || !data) {
    err = SL_INVALID_ARGUMENT;
    goto error;
  }
  if(0 == count) {
    goto exit;
  }
  if(!IS_ALIGNED(data, vec->data_alignment)) {
    err = SL_ALIGNMENT_ERROR;
    goto error;
  }
  if(vec->length == SIZE_MAX) {
    err = SL_OVERFLOW_ERROR;
    goto error;
  }
  if(id == vec->length) {
    err = ensure_allocated(vec, vec->length + count, true);
    if(err != SL_NO_ERROR)
      goto error;

    dst = (void*)((uintptr_t)(vec->buffer) + vec->data_size * id);
    src = data;
    for(i = 0; i < count; ++i) {
      dst = memcpy(dst, src, vec->data_size);
      dst = (void*)((uintptr_t)(dst) + vec->data_size);
    }
  } else {
    if(vec->length + count >= vec->capacity) {
      size_t new_capacity = 0;
      NEXT_POWER_OF_2(vec->length + count, new_capacity);

      buffer = MEM_ALIGNED_ALLOC
        (vec->allocator, new_capacity * vec->data_size, vec->data_alignment);
      if(!buffer) {
        err = SL_MEMORY_ERROR;
        goto error;
      }

      /* Copy the vector data ranging from [0, id[ into the new buffer. */
      if(id > 0)
        memcpy(buffer, vec->buffer, vec->data_size * id);

      if(id < vec->length) {
        /* Copy from the vector data [id, length[ to the new buffer
         * [id+count, length + count[. */
        src = (void*)((uintptr_t)(vec->buffer) + vec->data_size * id);
        dst = (void*)((uintptr_t)(buffer) + vec->data_size * (id + count));
        dst = memcpy(dst, src, vec->data_size * (vec->length - id));
      }

      /* Set the src/dst pointer of the data insertion process. */
      dst = (void*)((uintptr_t)(buffer) + vec->data_size * id);
      src = data;
      for(i = 0; i < count; ++i) {
        dst = memcpy(dst, src, vec->data_size);
        dst = (void*)((uintptr_t)(dst) + vec->data_size);
      }

      /* The data to insert may be contained in vec, i.e. free vec->buffer
       * *AFTER* the insertion. */
      if(vec->buffer)
        MEM_FREE(vec->allocator, vec->buffer);

      vec->buffer = buffer;
      vec->capacity = new_capacity;
      buffer = NULL;

    } else {
      if(id < vec->length) {
        src = (void*)((uintptr_t)(vec->buffer) + vec->data_size * id);
        dst = (void*)((uintptr_t)(vec->buffer) + vec->data_size * (id + count));
        dst = memmove(dst, src, vec->data_size * (vec->length - id));
      }

      /* Set the src/dst pointer of the data insertion process. Note that If the
       * data to insert lies in the vector range [id, vec.length[ then it was
       * previously memoved. Its new address is offseted by count * data_size
       * bytes. */
      dst = (void*)((uintptr_t)(vec->buffer) + vec->data_size * id);
      if(IS_MEMORY_OVERLAPPED
         (data,
          vec->data_size,
          (void*)((uintptr_t)(vec->buffer) + vec->data_size * id),
          (vec->length - id) * vec->data_size)) {
        src = (void*)((uintptr_t)data + count * vec->data_size);
      } else {
        src = data;
      }
      for(i = 0; i < count; ++i) {
        dst = memcpy(dst, src, vec->data_size);
        dst = (void*)((uintptr_t)(dst) + vec->data_size);
      }
    }
  }
  vec->length += count;

exit:
  return err;
error:
  if(buffer)
    MEM_FREE(vec->allocator, buffer);
  goto exit;

}
Ejemplo n.º 7
0
int d40_phy_fill_lli(struct d40_phy_lli *lli,
		     dma_addr_t data,
		     u32 data_size,
		     int psize,
		     dma_addr_t next_lli,
		     u32 reg_cfg,
		     bool term_int,
		     u32 data_width,
		     bool is_device)
{
	int num_elems;

	if (psize == STEDMA40_PSIZE_PHY_1)
		num_elems = 1;
	else
		num_elems = 2 << psize;

	/*
	 * Size is 16bit. data_width is 8, 16, 32 or 64 bit
	 * Block large than 64 KiB must be split.
	 */
	if (data_size > (0xffff << data_width))
		return -EINVAL;

	/* Must be aligned */
	if (!IS_ALIGNED(data, 0x1 << data_width))
		return -EINVAL;

	/* Transfer size can't be smaller than (num_elms * elem_size) */
	if (data_size < num_elems * (0x1 << data_width))
		return -EINVAL;

	/* The number of elements. IE now many chunks */
	lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;

	/*
	 * Distance to next element sized entry.
	 * Usually the size of the element unless you want gaps.
	 */
	if (!is_device)
		lli->reg_elt |= (0x1 << data_width) <<
			D40_SREG_ELEM_PHY_EIDX_POS;

	/* Where the data is */
	lli->reg_ptr = data;
	lli->reg_cfg = reg_cfg;

	/* If this scatter list entry is the last one, no next link */
	if (next_lli == 0)
		lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
	else
		lli->reg_lnk = next_lli;

	/* Set/clear interrupt generation on this link item.*/
	if (term_int)
		lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
	else
		lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);

	/* Post link */
	lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;

	return 0;
}
Ejemplo n.º 8
0
h264enc *h264enc_new(const struct h264enc_params *p)
{
	h264enc *c;
	int i;

	/* check parameter validity */
	if (!IS_ALIGNED(p->src_width, 16) || !IS_ALIGNED(p->src_height, 16) ||
		!IS_ALIGNED(p->width, 2) || !IS_ALIGNED(p->height, 2) ||
		p->width > p->src_width || p->height > p->src_height)
	{
		MSG("invalid picture size");
		return NULL;
	}

	if (p->qp == 0 || p->qp > 47)
	{
		MSG("invalid QP");
		return NULL;
	}

	if (p->src_format != H264_FMT_NV12 && p->src_format != H264_FMT_NV16)
	{
		MSG("invalid color format");
		return NULL;
	}

	/* allocate memory for h264enc structure */
	c = calloc(1, sizeof(*c));
	if (c == NULL)
	{
		MSG("can't allocate h264enc data");
		return NULL;
	}

	/* copy parameters */
	c->mb_width = DIV_ROUND_UP(p->width, 16);
	c->mb_height = DIV_ROUND_UP(p->height, 16);
	c->mb_stride = p->src_width / 16;

	c->crop_right = (c->mb_width * 16 - p->width) / 2;
	c->crop_bottom = (c->mb_height * 16 - p->height) / 2;

	c->profile_idc = p->profile_idc;
	c->level_idc = p->level_idc;

	c->entropy_coding_mode_flag = p->entropy_coding_mode ? 1 : 0;
	c->pic_init_qp = p->qp;
	c->keyframe_interval = p->keyframe_interval;

	c->write_sps_pps = 1;
	c->current_frame_num = 0;

	/* allocate input buffer */
	c->input_color_format = p->src_format;
	switch (c->input_color_format)
	{
	case H264_FMT_NV12:
		c->input_buffer_size = p->src_width * (p->src_height + p->src_height / 2);
		break;
	case H264_FMT_NV16:
		c->input_buffer_size = p->src_width * p->src_height * 2;
		break;
	}

	c->luma_buffer = ve_malloc(c->input_buffer_size);
	if (c->luma_buffer == NULL)
		goto nomem;

	c->chroma_buffer = c->luma_buffer + p->src_width * p->src_height;

	/* allocate bytestream output buffer */
	c->bytestream_buffer_size = 1 * 1024 * 1024;
	c->bytestream_buffer = ve_malloc(c->bytestream_buffer_size);
	if (c->bytestream_buffer == NULL)
		goto nomem;

	/* allocate reference picture memory */
	unsigned int luma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 16, 32);
	unsigned int chroma_size = ALIGN(c->mb_width * 16, 32) * ALIGN(c->mb_height * 8, 32);
	for (i = 0; i < 2; i++)
	{
		c->ref_picture[i].luma_buffer = ve_malloc(luma_size + chroma_size);
		c->ref_picture[i].chroma_buffer = c->ref_picture[i].luma_buffer + luma_size;
		c->ref_picture[i].extra_buffer = ve_malloc(luma_size / 4);
		if (c->ref_picture[i].luma_buffer == NULL || c->ref_picture[i].extra_buffer == NULL)
			goto nomem;
	}

	/* allocate unknown purpose buffers */
	c->extra_buffer_frame = ve_malloc(ALIGN(c->mb_width, 4) * c->mb_height * 8);
	c->extra_buffer_line = ve_malloc(c->mb_width * 32);
	if (c->extra_buffer_frame == NULL || c->extra_buffer_line == NULL)
		goto nomem;

	return c;

nomem:
	MSG("can't allocate VE memory");
	h264enc_free(c);
	return NULL;
}
Ejemplo n.º 9
0
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);
			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif

			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#ifdef CONFIG_USE_MFC_CMA
#if defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else
#elif defined(CONFIG_MACH_GC1)
					if (reg->start == 0x50400000) {
						if (memblock_reserve(0x50400000,
								0x400000))
							panic("memblock\n");
						if (memblock_reserve(0x53000000,
								0x500000))
							panic("memblock\n");
					} else
#endif
					{
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");
#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);
				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
Ejemplo n.º 10
0
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	struct htt_rx_ring_setup_ring *ring;
	const int num_rx_ring = 1;
	u16 flags;
	u32 fw_idx;
	int len;
	int ret;

	/*
	 * the HW expects the buffer to be an integral number of 4-byte
	 * "words"
	 */
	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);

	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
	    + (sizeof(*ring) * num_rx_ring);
	skb = ath10k_htc_alloc_skb(ar, len);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);

	cmd = (struct htt_cmd *)skb->data;
	ring = &cmd->rx_setup.rings[0];

	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
	cmd->rx_setup.hdr.num_rings = 1;

	/* FIXME: do we need all of this? */
	flags = 0;
	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
	flags |= HTT_RX_RING_FLAGS_PPDU_START;
	flags |= HTT_RX_RING_FLAGS_PPDU_END;
	flags |= HTT_RX_RING_FLAGS_MPDU_START;
	flags |= HTT_RX_RING_FLAGS_MPDU_END;
	flags |= HTT_RX_RING_FLAGS_MSDU_START;
	flags |= HTT_RX_RING_FLAGS_MSDU_END;
	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
	flags |= HTT_RX_RING_FLAGS_NULL_RX;
	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;

	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);

	ring->fw_idx_shadow_reg_paddr =
		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
	ring->flags = __cpu_to_le16(flags);
	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);

#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)

	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));

#undef desc_offset

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}
Ejemplo n.º 11
0
Archivo: vspace.c Proyecto: aoom/seL4
static inline bool_t
checkVPAlignment(vm_page_size_t sz, word_t w)
{
    return IS_ALIGNED(w, pageBitsForSize(sz));
}
Ejemplo n.º 12
0
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
    struct device *dev = htt->ar->dev;
    struct htt_cmd *cmd;
    struct htt_data_tx_desc_frag *tx_frags;
    struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
    struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
    struct sk_buff *txdesc = NULL;
    bool use_frags;
    u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
    u8 tid;
    int prefetch_len, desc_len;
    int msdu_id = -1;
    int res;
    u8 flags0;
    u16 flags1;

    res = ath10k_htt_tx_inc_pending(htt);
    if (res)
        goto err;

    spin_lock_bh(&htt->tx_lock);
    res = ath10k_htt_tx_alloc_msdu_id(htt);
    if (res < 0) {
        spin_unlock_bh(&htt->tx_lock);
        goto err_tx_dec;
    }
    msdu_id = res;
    htt->pending_tx[msdu_id] = msdu;
    spin_unlock_bh(&htt->tx_lock);

    prefetch_len = min(htt->prefetch_len, msdu->len);
    prefetch_len = roundup(prefetch_len, 4);

    desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;

    txdesc = ath10k_htc_alloc_skb(desc_len);
    if (!txdesc) {
        res = -ENOMEM;
        goto err_free_msdu_id;
    }

    /* Since HTT 3.0 there is no separate mgmt tx command. However in case
     * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
     * fragment list host driver specifies directly frame pointer. */
    use_frags = htt->target_version_major < 3 ||
                !ieee80211_is_mgmt(hdr->frame_control);

    if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
        ath10k_warn("htt alignment check failed. dropping packet.\n");
        res = -EIO;
        goto err_free_txdesc;
    }

    if (use_frags) {
        skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
        skb_cb->htt.pad_len = (unsigned long)msdu->data -
                              round_down((unsigned long)msdu->data, 4);

        skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
    } else {
        skb_cb->htt.frag_len = 0;
        skb_cb->htt.pad_len = 0;
    }

    res = ath10k_skb_map(dev, msdu);
    if (res)
        goto err_pull_txfrag;

    if (use_frags) {
        dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
                                DMA_TO_DEVICE);

        /* tx fragment list must be terminated with zero-entry */
        tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
        tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
                                          skb_cb->htt.frag_len +
                                          skb_cb->htt.pad_len);
        tx_frags[0].len   = __cpu_to_le32(msdu->len -
                                          skb_cb->htt.frag_len -
                                          skb_cb->htt.pad_len);
        tx_frags[1].paddr = __cpu_to_le32(0);
        tx_frags[1].len   = __cpu_to_le32(0);

        dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
                                   DMA_TO_DEVICE);
    }

    ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
               (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
    ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
                    msdu->data, msdu->len);

    skb_put(txdesc, desc_len);
    cmd = (struct htt_cmd *)txdesc->data;

    tid = ATH10K_SKB_CB(msdu)->htt.tid;

    ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);

    flags0  = 0;
    if (!ieee80211_has_protected(hdr->frame_control))
        flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
    flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

    if (use_frags)
        flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
                     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
    else
        flags0 |= SM(ATH10K_HW_TXRX_MGMT,
                     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

    flags1  = 0;
    flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
    flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
    flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
    flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

    cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
    cmd->data_tx.flags0      = flags0;
    cmd->data_tx.flags1      = __cpu_to_le16(flags1);
    cmd->data_tx.len         = __cpu_to_le16(msdu->len -
                               skb_cb->htt.frag_len -
                               skb_cb->htt.pad_len);
    cmd->data_tx.id          = __cpu_to_le16(msdu_id);
    cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
    cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);

    memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);

    res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
    if (res)
        goto err_unmap_msdu;

    return 0;

err_unmap_msdu:
    ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
    skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
    dev_kfree_skb_any(txdesc);
err_free_msdu_id:
    spin_lock_bh(&htt->tx_lock);
    htt->pending_tx[msdu_id] = NULL;
    ath10k_htt_tx_free_msdu_id(htt, msdu_id);
    spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
    ath10k_htt_tx_dec_pending(htt);
err:
    return res;
}
Ejemplo n.º 13
0
/*
 * Check that the two inodes are eligible for cloning, the ranges make
 * sense, and then flush all dirty data.  Caller must ensure that the
 * inodes have been locked against any other modifications.
 *
 * Returns: 0 for "nothing to clone", 1 for "something to clone", or
 * the usual negative error code.
 */
int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
			       struct inode *inode_out, loff_t pos_out,
			       u64 *len, bool is_dedupe)
{
	loff_t bs = inode_out->i_sb->s_blocksize;
	loff_t blen;
	loff_t isize;
	bool same_inode = (inode_in == inode_out);
	int ret;

	/* Don't touch certain kinds of inodes */
	if (IS_IMMUTABLE(inode_out))
		return -EPERM;

	if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
		return -ETXTBSY;

	/* Don't reflink dirs, pipes, sockets... */
	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
		return -EISDIR;
	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
		return -EINVAL;

	/* Are we going all the way to the end? */
	isize = i_size_read(inode_in);
	if (isize == 0)
		return 0;

	/* Zero length dedupe exits immediately; reflink goes to EOF. */
	if (*len == 0) {
		if (is_dedupe || pos_in == isize)
			return 0;
		if (pos_in > isize)
			return -EINVAL;
		*len = isize - pos_in;
	}

	/* Ensure offsets don't wrap and the input is inside i_size */
	if (pos_in + *len < pos_in || pos_out + *len < pos_out ||
	    pos_in + *len > isize)
		return -EINVAL;

	/* Don't allow dedupe past EOF in the dest file */
	if (is_dedupe) {
		loff_t	disize;

		disize = i_size_read(inode_out);
		if (pos_out >= disize || pos_out + *len > disize)
			return -EINVAL;
	}

	/* If we're linking to EOF, continue to the block boundary. */
	if (pos_in + *len == isize)
		blen = ALIGN(isize, bs) - pos_in;
	else
		blen = *len;

	/* Only reflink if we're aligned to block boundaries */
	if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
	    !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
		return -EINVAL;

	/* Don't allow overlapped reflink within the same file */
	if (same_inode) {
		if (pos_out + blen > pos_in && pos_out < pos_in + blen)
			return -EINVAL;
	}

	/* Wait for the completion of any pending IOs on both files */
	inode_dio_wait(inode_in);
	if (!same_inode)
		inode_dio_wait(inode_out);

	ret = filemap_write_and_wait_range(inode_in->i_mapping,
			pos_in, pos_in + *len - 1);
	if (ret)
		return ret;

	ret = filemap_write_and_wait_range(inode_out->i_mapping,
			pos_out, pos_out + *len - 1);
	if (ret)
		return ret;

	/*
	 * Check that the extents are the same.
	 */
	if (is_dedupe) {
		bool		is_same = false;

		ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
				inode_out, pos_out, *len, &is_same);
		if (ret)
			return ret;
		if (!is_same)
			return -EBADE;
	}

	return 1;
}
Ejemplo n.º 14
0
int crypto_run(struct fcrypt *fcr, struct kernel_crypt_op *kcop)
{
	struct csession *ses_ptr;
	struct crypt_op *cop = &kcop->cop;
	int ret;

	if (unlikely(cop->op != COP_ENCRYPT && cop->op != COP_DECRYPT)) {
		ddebug(1, "invalid operation op=%u", cop->op);
		return -EINVAL;
	}

	/* this also enters ses_ptr->sem */
	ses_ptr = crypto_get_session_by_sid(fcr, cop->ses);
	if (unlikely(!ses_ptr)) {
		derr(1, "invalid session ID=0x%08X", cop->ses);
		return -EINVAL;
	}

	if (ses_ptr->hdata.init != 0 && (cop->flags == 0 || cop->flags & COP_FLAG_RESET)) {
		ret = cryptodev_hash_reset(&ses_ptr->hdata);
		if (unlikely(ret)) {
			derr(1, "error in cryptodev_hash_reset()");
			goto out_unlock;
		}
	}

	if (ses_ptr->cdata.init != 0) {
		int blocksize = ses_ptr->cdata.blocksize;

		if (unlikely(cop->len % blocksize)) {
			derr(1, "data size (%u) isn't a multiple of block size (%u)",
				cop->len, blocksize);
			ret = -EINVAL;
			goto out_unlock;
		}

		cryptodev_cipher_set_iv(&ses_ptr->cdata, kcop->iv,
				min(ses_ptr->cdata.ivsize, kcop->ivlen));
	}

	if (likely(cop->len)) {
		if (cop->flags & COP_FLAG_NO_ZC) {
			if (unlikely(ses_ptr->alignmask && !IS_ALIGNED((unsigned long)cop->src, ses_ptr->alignmask))) {
				dwarning(2, "source address %p is not %d byte aligned - disabling zero copy",
						cop->src, ses_ptr->alignmask + 1);
				cop->flags &= ~COP_FLAG_NO_ZC;
			}

			if (unlikely(ses_ptr->alignmask && !IS_ALIGNED((unsigned long)cop->dst, ses_ptr->alignmask))) {
				dwarning(2, "destination address %p is not %d byte aligned - disabling zero copy",
						cop->dst, ses_ptr->alignmask + 1);
				cop->flags &= ~COP_FLAG_NO_ZC;
			}
		}

		if (cop->flags & COP_FLAG_NO_ZC)
			ret = __crypto_run_std(ses_ptr, &kcop->cop);
		else
			ret = __crypto_run_zc(ses_ptr, kcop);
		if (unlikely(ret))
			goto out_unlock;
	}

	if (ses_ptr->cdata.init != 0) {
		cryptodev_cipher_get_iv(&ses_ptr->cdata, kcop->iv,
				min(ses_ptr->cdata.ivsize, kcop->ivlen));
	}

	if (ses_ptr->hdata.init != 0 &&
		((cop->flags & COP_FLAG_FINAL) ||
		   (!(cop->flags & COP_FLAG_UPDATE) || cop->len == 0))) {

		ret = cryptodev_hash_final(&ses_ptr->hdata, kcop->hash_output);
		if (unlikely(ret)) {
			derr(0, "CryptoAPI failure: %d", ret);
			goto out_unlock;
		}
		kcop->digestsize = ses_ptr->hdata.digestsize;
	}

	if (ses_ptr->rdata.init != 0 && cop->len > 0) {
		kcop->rng_output = kmalloc(cop->len, GFP_KERNEL);
		if (unlikely(!kcop->rng_output)) {
			derr(0, "Not enough space to store %d random bytes.", cop->len);
			ret = -ENOMEM;
			goto out_unlock;
		}

		ret = cryptodev_rng_get_bytes(&ses_ptr->rdata, kcop->rng_output, cop->len);
		// some RNGs return 0 for success, while
		// some return the number of bytes generated
		if (unlikely(ret != 0 && ret != cop->len)) {
			derr(0, "RNG failure: %d", ret);
			kfree(kcop->rng_output); kcop->rng_output = NULL;
			goto out_unlock;
		}

		ret = 0;
		kcop->rnglen = cop->len;
	}

out_unlock:
	crypto_put_session(ses_ptr);
	return ret;
}
void
crypto_send_data(void *ctx_ptr, unsigned char *data_ptr,
		 unsigned int buff_size, unsigned int bytes_to_write,
		 unsigned int *ret_status)
{
	crypto_SHA1_ctx *sha1_ctx = (crypto_SHA1_ctx *) ctx_ptr;
	unsigned int bytes_left = 0;
	unsigned int i = 0;
	unsigned int ce_status = 0;
	unsigned int ce_err_bmsk = 0;
	unsigned int is_not_aligned = FALSE;
	unsigned char data[4];
	unsigned char *buff_ptr = data_ptr;

	/* Check if the buff_ptr is aligned */
	if (!(IS_ALIGNED(buff_ptr))) {
		is_not_aligned = TRUE;
	}

	/* Fill the saved_buff with data from buff_ptr. First we have to write
	   all the data from the saved_buff and then we will write data from
	   buff_ptr. We will update bytes_left and buff_ptr in the while loop
	   once are done writing all the data from saved_buff. */

	if (sha1_ctx->saved_buff_indx != 0) {
		memcpy(sha1_ctx->saved_buff + sha1_ctx->saved_buff_indx,
		       buff_ptr,
		       (((buff_size + sha1_ctx->saved_buff_indx) <=
			 CRYPTO_SHA_BLOCK_SIZE)
			? buff_size : (CRYPTO_SHA_BLOCK_SIZE -
				       sha1_ctx->saved_buff_indx)));

		if (bytes_to_write >= CRYPTO_SHA_BLOCK_SIZE) {
			bytes_left = CRYPTO_SHA_BLOCK_SIZE;
		} else {
			bytes_left = bytes_to_write;
		}
	} else {
		bytes_left = bytes_to_write;
	}

	/* Error bitmask to check crypto engine status */
	ce_err_bmsk = (SW_ERR | DIN_RDY | DIN_SIZE_AVAIL);

	while (bytes_left >= 4) {
		ce_status = rd_ce(CRYPTO3_STATUS);
		ce_status &= ce_err_bmsk;

		if (ce_status & SW_ERR) {
			/* If there is SW_ERR, reset the engine */
			crypto_eng_reset();
			*ret_status = CRYPTO_ERR_FAIL;
			dprintf(CRITICAL, "crypto_send_data sw error\n");
			return;
		}

		/* We can write data now - 4 bytes at a time in network byte order */
		if ((ce_status & DIN_RDY)
		    && ((ce_status & DIN_SIZE_AVAIL) >= 4)) {
			if (sha1_ctx->saved_buff_indx != 0) {
				/* Write from saved_buff */
				wr_ce(htonl
				      (*
				       ((unsigned int *)(sha1_ctx->saved_buff) +
					i)), CRYPTO3_DATA_IN);
			} else {
				if (!is_not_aligned) {
					/* Write from buff_ptr aligned */
					wr_ce(htonl
					      (*((unsigned int *)buff_ptr + i)),
					      CRYPTO3_DATA_IN);
				} else {
					/* If buff_ptr is not aligned write byte by byte */
					data[0] = *(buff_ptr + i);
					data[1] = *(buff_ptr + i + 1);
					data[2] = *(buff_ptr + i + 2);
					data[3] = *(buff_ptr + i + 3);
					/* i will incremented by 1 in outside block */
					i += 3;
					wr_ce(htonl(*(unsigned int *)data),
					      CRYPTO3_DATA_IN);
					memset(data, 0, 4);
				}
			}
			i++;
			bytes_left -= 4;

			/* Check if we have written from saved_buff. Adjust buff_ptr and
			   bytes_left accordingly */
			if ((sha1_ctx->saved_buff_indx != 0)
			    && (bytes_left == 0)
			    && (bytes_to_write > CRYPTO_SHA_BLOCK_SIZE)) {
				bytes_left =
				    (bytes_to_write - CRYPTO_SHA_BLOCK_SIZE);
				buff_ptr =
				    (unsigned char *)((unsigned char *)data_ptr
						      + CRYPTO_SHA_BLOCK_SIZE -
						      sha1_ctx->
						      saved_buff_indx);
				i = 0;
				sha1_ctx->saved_buff_indx = 0;
				if (!(IS_ALIGNED(buff_ptr))) {
					is_not_aligned = TRUE;
				}
			}
		}
	}

	/* We might have bytes_left < 4. Write them now if available */
	if (bytes_left) {
		memset(data, 0, sizeof(unsigned int));

		if (sha1_ctx->saved_buff_indx)
			buff_ptr = (sha1_ctx->saved_buff + bytes_to_write - 1);
		else
			buff_ptr =
			    (((unsigned char *)data_ptr) + buff_size - 1);

		for (i = 0; i < bytes_left; i++) {
			data[3 - i] = *(buff_ptr - bytes_left + i + 1);
		}

		ce_status = rd_ce(CRYPTO3_STATUS);
		ce_status &= ce_err_bmsk;

		if (ce_status & SW_ERR) {
			crypto_eng_reset();
			*ret_status = CRYPTO_ERR_FAIL;
			dprintf(CRITICAL, "crypto_send_data sw error 2\n");
			return;
		}
		if ((ce_status & DIN_RDY)
		    && ((ce_status & DIN_SIZE_AVAIL) >= 4)) {
			wr_ce(*(unsigned int *)data, CRYPTO3_DATA_IN);
		}
	}
	*ret_status = CRYPTO_ERR_NONE;
	return;
}
Ejemplo n.º 16
0
/*!
 * Memory copy using HW engines
 *  
 *  reserved [unused]
 *  pDmaInputBuffer [in] -A structure which represents the DMA input buffer.
 *  pDmaOutputBuffer [in/out] -A structure which represents the DMA output buffer.
 * 
 * \return int One of DX_SYM_* error codes defined in dx_error.h.
 */
int ProcessBypass(struct sep_ctx_generic *reserved, DmaBuffer_s *pDmaInputBuffer, DmaBuffer_s *pDmaOutputBuffer)
{
	Bypass_t dmaTypeIn, dmaTypeOut;
	int drvRc = DX_RET_OK;

	dmaTypeIn = GetBypassType(pDmaInputBuffer->dmaBufType, pDmaInputBuffer->pData);
	dmaTypeOut = GetBypassType(pDmaOutputBuffer->dmaBufType, pDmaOutputBuffer->pData);
	
	if ((dmaTypeIn == BYPASS_MAX) || (dmaTypeOut == BYPASS_MAX)) {
		DX_PAL_LOG_ERR("Invalid din/dout memory type\n");
		drvRc = DX_RET_INVARG;
		goto EndWithErr;
	}

	switch (dmaTypeIn) {
	case BYPASS_SRAM:
		switch (dmaTypeOut) {
		case BYPASS_DLLI:
			if (IS_ALIGNED(pDmaInputBuffer->pData, sizeof(uint32_t)) ||
			    IS_MULT(pDmaInputBuffer->size, sizeof(uint32_t))) { 
				DescBypass(
					DMA_SRAM, 
					pDmaInputBuffer->pData, 
					pDmaInputBuffer->size,
					pDmaInputBuffer->axiNs,
					DMA_DLLI, 
					pDmaOutputBuffer->pData,
					pDmaOutputBuffer->size,
					pDmaOutputBuffer->axiNs);
			} else {
				DX_PAL_LOG_ERR("Bad address or bad size. SRAM to DLLI copy -Input address %xl with %ul B\n",
					pDmaInputBuffer->pData, pDmaInputBuffer->size);
				drvRc = DX_RET_INVARG;
				goto EndWithErr;
			}
			break;
		default:
			DX_PAL_LOG_ERR("Invalid BYPASS mode\n");
			drvRc = DX_RET_UNSUPP_ALG_MODE;
			goto EndWithErr;
		}
		break;
	case BYPASS_DLLI:
		switch (dmaTypeOut) {
		case BYPASS_SRAM:
			if (IS_ALIGNED(pDmaInputBuffer->pData, sizeof(uint32_t)) ||
			    IS_MULT(pDmaInputBuffer->size, sizeof(uint32_t))) { 
				DescBypass(
					DMA_DLLI, 
					pDmaInputBuffer->pData, 
					pDmaInputBuffer->size,
					pDmaInputBuffer->axiNs,
					DMA_SRAM, 
					pDmaOutputBuffer->pData,
					pDmaOutputBuffer->size,
					pDmaOutputBuffer->axiNs);
			} else {
				DX_PAL_LOG_ERR("Bad address or bad size. SRAM to DLLI copy -Input address %xl with %ul B\n",
					pDmaInputBuffer->pData, pDmaInputBuffer->size);
				drvRc = DX_RET_INVARG;
				goto EndWithErr;
			}
			break;
		case BYPASS_DLLI:
			DescBypass(
				    DMA_BUF_TYPE_TO_MODE(pDmaInputBuffer->dmaBufType), 
				    pDmaInputBuffer->pData, 
				    pDmaInputBuffer->size,
				    pDmaInputBuffer->axiNs,
				    DMA_BUF_TYPE_TO_MODE(pDmaOutputBuffer->dmaBufType), 
				    pDmaOutputBuffer->pData,
				    pDmaOutputBuffer->size,
				    pDmaOutputBuffer->axiNs);
			break;
		default:
			DX_PAL_LOG_ERR("Invalid BYPASS mode\n");
			drvRc = DX_RET_UNSUPP_ALG_MODE;
			goto EndWithErr;
		}
		break;
	default:
		DX_PAL_LOG_ERR("Invalid BYPASS mode\n");
		drvRc = DX_RET_UNSUPP_ALG_MODE;
		break;
	}

EndWithErr:
	return drvRc;
}
Ejemplo n.º 17
0
/*
 * msm_rpm_log_copy() - Copies messages from a volatile circular buffer in
 *			the RPM's shared memory into a private local buffer
 * msg_buffer:		pointer to local buffer (string)
 * buf_len:		length of local buffer in bytes
 * read_start_idx:	index into shared memory buffer
 *
 * Return value:	number of bytes written to the local buffer
 *
 * Copies messages stored in a circular buffer in the RPM Message Memory into
 * a specified local buffer.  The RPM processor is unaware of these reading
 * efforts, so care is taken to make sure that messages are valid both before
 * and after reading.  The RPM processor utilizes a ULog driver to write the
 * log.  The RPM processor maintains tail and head indices.  These correspond
 * to the next byte to write into, and the first valid byte, respectively.
 * Both indices increase monotonically (except for rollover).
 *
 * Messages take the form of [(u32)length] [(char)data0,1,...] in which the
 * length specifies the number of payload bytes.  Messages must be 4 byte
 * aligned, so padding is added at the end of a message as needed.
 *
 * Print format:
 * - 0xXX, 0xXX, 0xXX
 * - 0xXX
 * etc...
 */
static u32 msm_rpm_log_copy(const struct msm_rpm_log_platform_data *pdata,
                            char *msg_buffer, u32 buf_len, u32 *read_idx)
{
    u32 head_idx, tail_idx;
    u32 pos = 0;
    u32 i = 0;
    u32 msg_len;
    u32 pos_start;
    char temp[4];

    tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
                                MSM_RPM_LOG_TAIL);
    head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
                                MSM_RPM_LOG_HEAD);

    /* loop while the remote buffer has valid messages left to read */
    while (tail_idx - head_idx > 0 && tail_idx - *read_idx > 0) {
        head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
                                    MSM_RPM_LOG_HEAD);
        /* check if the message to be read is valid */
        if (tail_idx - *read_idx > tail_idx - head_idx) {
            *read_idx = head_idx;
            continue;
        }
        /*
         * Ensure that the reported buffer size is within limits of
         * known maximum size and that all indices are 4 byte aligned.
         * These conditions are required to interact with a ULog buffer
         * properly.
         */
        if (tail_idx - head_idx > pdata->log_len ||
                !IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
            break;

        msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
                                   (*read_idx >> 2) & pdata->log_len_mask);

        /* handle messages that claim to be longer than the log */
        if (PADDED_LENGTH(msg_len) > tail_idx - *read_idx - 4)
            msg_len = tail_idx - *read_idx - 4;

        /* check that the local buffer has enough space for this msg */
        if (pos + PRINTED_LENGTH(msg_len) > buf_len)
            break;

        pos_start = pos;
        pos += scnprintf(msg_buffer + pos, buf_len - pos, "- ");

        /* copy message payload to local buffer */
        for (i = 0; i < msg_len; i++) {
            /* read from shared memory 4 bytes at a time */
            if (IS_ALIGNED(i, 4))
                *((u32 *)temp) = msm_rpm_log_read(pdata,
                                                  MSM_RPM_LOG_PAGE_BUFFER,
                                                  ((*read_idx + 4 + i) >> 2) &
                                                  pdata->log_len_mask);

            pos += scnprintf(msg_buffer + pos, buf_len - pos,
                             "0x%02X, ", temp[i & 0x03]);
        }

        pos += scnprintf(msg_buffer + pos, buf_len - pos, "\n");

        head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
                                    MSM_RPM_LOG_HEAD);

        /* roll back if message that was read is not still valid */
        if (tail_idx - *read_idx > tail_idx - head_idx)
            pos = pos_start;

        *read_idx += PADDED_LENGTH(msg_len) + 4;
    }

    return pos;
}
Ejemplo n.º 18
0
static GstBufferPool *
gst_msdkdec_create_buffer_pool (GstMsdkDec * thiz, GstVideoInfo * info,
    guint num_buffers)
{
  GstBufferPool *pool = NULL;
  GstStructure *config;
  GstAllocator *allocator = NULL;
  GstVideoAlignment align;
  GstCaps *caps = NULL;
  GstAllocationParams params = { 0, 31, 0, 0, };
  mfxFrameAllocResponse *alloc_resp = NULL;

  g_return_val_if_fail (info, NULL);
  g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (info)
      && GST_VIDEO_INFO_HEIGHT (info), NULL);

  alloc_resp = &thiz->alloc_resp;

  pool = gst_msdk_buffer_pool_new (thiz->context, alloc_resp);
  if (!pool)
    goto error_no_pool;

  if (G_UNLIKELY (!IS_ALIGNED (GST_VIDEO_INFO_WIDTH (info), 16)
          || !IS_ALIGNED (GST_VIDEO_INFO_HEIGHT (info), 32))) {
    gst_msdk_set_video_alignment (info, &align);
    gst_video_info_align (info, &align);
  }

  caps = gst_video_info_to_caps (info);

  /* allocators should use the same width/height/stride/height_alignment of
   * negotiated output caps which is what we configure in msdk_allocator */
  if (thiz->use_dmabuf)
    allocator = gst_msdk_dmabuf_allocator_new (thiz->context, info, alloc_resp);
  else if (thiz->use_video_memory)
    allocator = gst_msdk_video_allocator_new (thiz->context, info, alloc_resp);
  else
    allocator = gst_msdk_system_allocator_new (info);

  if (!allocator)
    goto error_no_allocator;

  config = gst_buffer_pool_get_config (GST_BUFFER_POOL_CAST (pool));
  gst_buffer_pool_config_set_params (config, caps,
      GST_VIDEO_INFO_SIZE (info), num_buffers, 0);
  gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META);
  gst_buffer_pool_config_add_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (thiz->use_video_memory) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_MSDK_USE_VIDEO_MEMORY);
    if (thiz->use_dmabuf)
      gst_buffer_pool_config_add_option (config,
          GST_BUFFER_POOL_OPTION_MSDK_USE_DMABUF);
  }

  gst_buffer_pool_config_set_video_alignment (config, &align);
  gst_buffer_pool_config_set_allocator (config, allocator, &params);
  gst_object_unref (allocator);

  if (!gst_buffer_pool_set_config (pool, config))
    goto error_pool_config;

  return pool;

error_no_pool:
  {
    GST_INFO_OBJECT (thiz, "failed to create bufferpool");
    return NULL;
  }
error_no_allocator:
  {
    GST_INFO_OBJECT (thiz, "failed to create allocator");
    gst_object_unref (pool);
    return NULL;
  }
error_pool_config:
  {
    GST_INFO_OBJECT (thiz, "failed to set config");
    gst_object_unref (pool);
    gst_object_unref (allocator);
    return NULL;
  }
}
Ejemplo n.º 19
0
/*
 * msm_rpm_log_copy() - Copies messages from a volatile circular buffer in
 *			the RPM's shared memory into a private local buffer
 * msg_buffer:		pointer to local buffer (string)
 * buf_len:		length of local buffer in bytes
 * read_start_idx:	index into shared memory buffer
 *
 * Return value:	number of bytes written to the local buffer
 *
 * Copies messages stored in a circular buffer in the RPM Message Memory into
 * a specified local buffer.  The RPM processor is unaware of these reading
 * efforts, so care is taken to make sure that messages are valid both before
 * and after reading.  The RPM processor utilizes a ULog driver to write the
 * log.  The RPM processor maintains tail and head indices.  These correspond
 * to the next byte to write into, and the first valid byte, respectively.
 * Both indices increase monotonically (except for rollover).
 *
 * Messages take the form of [(u32)length] [(char)data0,1,...] in which the
 * length specifies the number of payload bytes.  Messages must be 4 byte
 * aligned, so padding is added at the end of a message as needed.
 *
 * Print format:
 * - 0xXX, 0xXX, 0xXX
 * - 0xXX
 * etc...
 */
static u32 msm_rpm_log_copy(const struct msm_rpm_log_platform_data *pdata,
			    char *msg_buffer, u32 buf_len, u32 *read_idx)
{
	u32 head_idx, tail_idx;
	u32 pos = 0;
	u32 i = 0;
	u32 msg_len;
	u32 pos_start;
	char temp[4];

	tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
				    MSM_RPM_LOG_TAIL);
	head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
				    MSM_RPM_LOG_HEAD);

	
	while (tail_idx - head_idx > 0 && tail_idx - *read_idx > 0) {
		head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
					    MSM_RPM_LOG_HEAD);
		
		if (tail_idx - *read_idx > tail_idx - head_idx) {
			*read_idx = head_idx;
			continue;
		}
		if (tail_idx - head_idx > pdata->log_len ||
		    !IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
			break;

		msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
					(*read_idx >> 2) & pdata->log_len_mask);

		
		if (PADDED_LENGTH(msg_len) > tail_idx - *read_idx - 4)
			msg_len = tail_idx - *read_idx - 4;

		
		if (pos + PRINTED_LENGTH(msg_len) > buf_len)
			break;

		pos_start = pos;
		pos += scnprintf(msg_buffer + pos, buf_len - pos, "- ");

		
		for (i = 0; i < msg_len; i++) {
			
			if (IS_ALIGNED(i, 4))
				*((u32 *)temp) = msm_rpm_log_read(pdata,
						MSM_RPM_LOG_PAGE_BUFFER,
						((*read_idx + 4 + i) >> 2) &
							pdata->log_len_mask);

			pos += scnprintf(msg_buffer + pos, buf_len - pos,
					 "0x%02X, ", temp[i & 0x03]);
		}

		pos += scnprintf(msg_buffer + pos, buf_len - pos, "\n");

		head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
					    MSM_RPM_LOG_HEAD);

		
		if (tail_idx - *read_idx > tail_idx - head_idx)
			pos = pos_start;

		*read_idx += PADDED_LENGTH(msg_len) + 4;
	}

	return pos;
}
Ejemplo n.º 20
0
static int btrfs_check_super(struct btrfs_super_block *sb)
{
	int ret = 0;

	if (sb->flags & ~BTRFS_SUPER_FLAG_SUPP) {
		printf("%s: Unsupported flags: %llu\n", __func__,
		       sb->flags & ~BTRFS_SUPER_FLAG_SUPP);
	}

	if (sb->root_level > BTRFS_MAX_LEVEL) {
		printf("%s: tree_root level too big: %d >= %d\n", __func__,
		       sb->root_level, BTRFS_MAX_LEVEL);
		ret = -1;
	}

	if (sb->chunk_root_level > BTRFS_MAX_LEVEL) {
		printf("%s: chunk_root level too big: %d >= %d\n", __func__,
		       sb->chunk_root_level, BTRFS_MAX_LEVEL);
		ret = -1;
	}

	if (sb->log_root_level > BTRFS_MAX_LEVEL) {
		printf("%s: log_root level too big: %d >= %d\n", __func__,
		       sb->log_root_level, BTRFS_MAX_LEVEL);
		ret = -1;
	}

	if (!is_power_of_2(sb->sectorsize) || sb->sectorsize < 4096 ||
	    sb->sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
		printf("%s: invalid sectorsize %u\n", __func__,
		       sb->sectorsize);
		ret = -1;
	}

	if (!is_power_of_2(sb->nodesize) || sb->nodesize < sb->sectorsize ||
	    sb->nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
		printf("%s: invalid nodesize %u\n", __func__, sb->nodesize);
		ret = -1;
	}

	if (sb->nodesize != sb->__unused_leafsize) {
		printf("%s: invalid leafsize %u, should be %u\n", __func__,
		       sb->__unused_leafsize, sb->nodesize);
		ret = -1;
	}

	if (!IS_ALIGNED(sb->root, sb->sectorsize)) {
		printf("%s: tree_root block unaligned: %llu\n", __func__,
		       sb->root);
		ret = -1;
	}

	if (!IS_ALIGNED(sb->chunk_root, sb->sectorsize)) {
		printf("%s: chunk_root block unaligned: %llu\n", __func__,
		       sb->chunk_root);
		ret = -1;
	}

	if (!IS_ALIGNED(sb->log_root, sb->sectorsize)) {
		printf("%s: log_root block unaligned: %llu\n", __func__,
		       sb->log_root);
		ret = -1;
	}

	if (memcmp(sb->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
		printf("%s: dev_item UUID does not match fsid\n", __func__);
		ret = -1;
	}

	if (sb->bytes_used < 6*sb->nodesize) {
		printf("%s: bytes_used is too small %llu\n", __func__,
		       sb->bytes_used);
		ret = -1;
	}

	if (!is_power_of_2(sb->stripesize)) {
		printf("%s: invalid stripesize %u\n", __func__, sb->stripesize);
		ret = -1;
	}

	if (sb->sys_chunk_array_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
		printf("%s: system chunk array too big %u > %u\n", __func__,
		       sb->sys_chunk_array_size, BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
		ret = -1;
	}

	if (sb->sys_chunk_array_size < sizeof(struct btrfs_key) +
	    sizeof(struct btrfs_chunk)) {
		printf("%s: system chunk array too small %u < %lu\n", __func__,
		       sb->sys_chunk_array_size, (u32) sizeof(struct btrfs_key)
		       + sizeof(struct btrfs_chunk));
		ret = -1;
	}

	return ret;
}
Ejemplo n.º 21
0
static int seqiv_aead_encrypt(struct aead_request *req)
{
	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
	struct aead_request *subreq = aead_request_ctx(req);
	crypto_completion_t compl;
	void *data;
	u8 *info;
	unsigned int ivsize = 8;
	int err;

	if (req->cryptlen < ivsize)
		return -EINVAL;

	aead_request_set_tfm(subreq, ctx->child);

	compl = req->base.complete;
	data = req->base.data;
	info = req->iv;

	if (req->src != req->dst) {
		SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);

		skcipher_request_set_tfm(nreq, ctx->sknull);
		skcipher_request_set_callback(nreq, req->base.flags,
					      NULL, NULL);
		skcipher_request_set_crypt(nreq, req->src, req->dst,
					   req->assoclen + req->cryptlen,
					   NULL);

		err = crypto_skcipher_encrypt(nreq);
		if (err)
			return err;
	}

	if (unlikely(!IS_ALIGNED((unsigned long)info,
				 crypto_aead_alignmask(geniv) + 1))) {
		info = kmalloc(ivsize, req->base.flags &
				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
								  GFP_ATOMIC);
		if (!info)
			return -ENOMEM;

		memcpy(info, req->iv, ivsize);
		compl = seqiv_aead_encrypt_complete;
		data = req;
	}

	aead_request_set_callback(subreq, req->base.flags, compl, data);
	aead_request_set_crypt(subreq, req->dst, req->dst,
			       req->cryptlen - ivsize, info);
	aead_request_set_ad(subreq, req->assoclen + ivsize);

	crypto_xor(info, ctx->salt, ivsize);
	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);

	err = crypto_aead_encrypt(subreq);
	if (unlikely(info != req->iv))
		seqiv_aead_encrypt_complete2(req, err);
	return err;
}
Ejemplo n.º 22
0
static int echainiv_encrypt(struct aead_request *req)
{
	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
	struct aead_request *subreq = aead_request_ctx(req);
	crypto_completion_t compl;
	void *data;
	u8 *info;
	unsigned int ivsize = crypto_aead_ivsize(geniv);
	int err;

	if (req->cryptlen < ivsize)
		return -EINVAL;

	aead_request_set_tfm(subreq, ctx->child);

	compl = echainiv_encrypt_complete;
	data = req;
	info = req->iv;

	if (req->src != req->dst) {
		struct blkcipher_desc desc = {
			.tfm = ctx->null,
		};

		err = crypto_blkcipher_encrypt(
			&desc, req->dst, req->src,
			req->assoclen + req->cryptlen);
		if (err)
			return err;
	}

	if (unlikely(!IS_ALIGNED((unsigned long)info,
				 crypto_aead_alignmask(geniv) + 1))) {
		info = kmalloc(ivsize, req->base.flags &
				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
								  GFP_ATOMIC);
		if (!info)
			return -ENOMEM;

		memcpy(info, req->iv, ivsize);
	}

	aead_request_set_callback(subreq, req->base.flags, compl, data);
	aead_request_set_crypt(subreq, req->dst, req->dst,
			       req->cryptlen, info);
	aead_request_set_ad(subreq, req->assoclen);

	crypto_xor(info, ctx->salt, ivsize);
	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
	echainiv_read_iv(info, ivsize);

	err = crypto_aead_encrypt(subreq);
	echainiv_encrypt_complete2(req, err);
	return err;
}

static int echainiv_decrypt(struct aead_request *req)
{
	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
	struct aead_request *subreq = aead_request_ctx(req);
	crypto_completion_t compl;
	void *data;
	unsigned int ivsize = crypto_aead_ivsize(geniv);

	if (req->cryptlen < ivsize)
		return -EINVAL;

	aead_request_set_tfm(subreq, ctx->child);

	compl = req->base.complete;
	data = req->base.data;

	aead_request_set_callback(subreq, req->base.flags, compl, data);
	aead_request_set_crypt(subreq, req->src, req->dst,
			       req->cryptlen - ivsize, req->iv);
	aead_request_set_ad(subreq, req->assoclen + ivsize);

	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);

	return crypto_aead_decrypt(subreq);
}

static int echainiv_aead_create(struct crypto_template *tmpl,
				struct rtattr **tb)
{
	struct aead_instance *inst;
	struct crypto_aead_spawn *spawn;
	struct aead_alg *alg;
	int err;

	inst = aead_geniv_alloc(tmpl, tb, 0, 0);

	if (IS_ERR(inst))
		return PTR_ERR(inst);

	spawn = aead_instance_ctx(inst);
	alg = crypto_spawn_aead_alg(spawn);

	err = -EINVAL;
	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
	    inst->alg.ivsize > MAX_IV_SIZE)
		goto free_inst;

	inst->alg.encrypt = echainiv_encrypt;
	inst->alg.decrypt = echainiv_decrypt;

	inst->alg.init = aead_init_geniv;
	inst->alg.exit = aead_exit_geniv;

	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
	inst->alg.base.cra_ctxsize += inst->alg.ivsize;

	inst->free = aead_geniv_free;

	err = aead_register_instance(tmpl, inst);
	if (err)
		goto free_inst;

out:
	return err;

free_inst:
	aead_geniv_free(inst);
	goto out;
}

static void echainiv_free(struct crypto_instance *inst)
{
	aead_geniv_free(aead_instance(inst));
}