static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) { ASF_FP_LINUX_CRYPTO_FENTRY; ASF_FP_LINUX_CRYPTO_FEXIT; return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + seqhilen, crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; }
static int via_rng_data_present(struct hwrng *rng, int wait) { char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ ((aligned(STACK_ALIGN))); u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); u32 bytes_out; int i; /* We choose the recommended 1-byte-per-instruction RNG rate, * for greater randomness at the expense of speed. Larger * values 2, 4, or 8 bytes-per-instruction yield greater * speed at lesser randomness. * * If you change this to another VIA_CHUNK_n, you must also * change the ->n_bytes values in rng_vendor_ops[] tables. * VIA_CHUNK_8 requires further code changes. * * A copy of MSR_VIA_RNG is placed in eax_out when xstore * completes. */ for (i = 0; i < 20; i++) { *via_rng_datum = 0; /* paranoia, not really necessary */ bytes_out = xstore(via_rng_datum, VIA_RNG_CHUNK_1); bytes_out &= VIA_XSTORE_CNT_MASK; if (bytes_out || !wait) break; udelay(10); } rng->priv = *via_rng_datum; return bytes_out ? 1 : 0; }
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); }
static int crypto_rfc3686_crypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_rfc3686_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct crypto_ablkcipher *child = ctx->child; unsigned long align = crypto_ablkcipher_alignmask(tfm); struct crypto_rfc3686_req_ctx *rctx = (void *)PTR_ALIGN((u8 *)ablkcipher_request_ctx(req), align + 1); struct ablkcipher_request *subreq = &rctx->subreq; u8 *iv = rctx->iv; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); ablkcipher_request_set_tfm(subreq, child); ablkcipher_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->nbytes, iv); return crypto_ablkcipher_encrypt(subreq); }
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; u8 *ctrblk = walk->iv; u8 *src = walk->src.virt.addr; u8 tmp[bsize + alignmask]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); crypto_xor(src, keystream, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk, bsize); src += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; }
static void *real_aligned_malloc (size_t size, size_t align) { void *p0, *p; if (NOT_POWER_OF_TWO(align)) { errno = EINVAL; return NULL; } if (align < sizeof(void *)) { align = sizeof(void *); } /* including the extra sizeof(void*) is overkill on a 32-bit machine, since malloc is already 8-byte aligned, as long as we enforce alignment >= 8 ...but oh well */ p0 = malloc(size + align + sizeof(void *)); if (!p0) { return NULL; } p = PTR_ALIGN(p0, align); ORIG_PTR(p) = p0; return p; }
static int crypto_rfc3686_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_blkcipher *tfm = desc->tfm; struct crypto_rfc3686_ctx *ctx = crypto_blkcipher_ctx(tfm); struct crypto_blkcipher *child = ctx->child; unsigned long alignmask = crypto_blkcipher_alignmask(tfm); u8 ivblk[CTR_RFC3686_BLOCK_SIZE + alignmask]; u8 *iv = PTR_ALIGN(ivblk + 0, alignmask + 1); u8 *info = desc->info; int err; /* set up counter block */ memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); desc->tfm = child; desc->info = iv; err = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); desc->tfm = tfm; desc->info = info; return err; }
static int alloc_align_buffer(struct urb *urb, gfp_t mem_flags) { struct dma_align_buffer *temp, *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 || !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1))) return 0; /* Allocate a buffer with enough padding for alignment */ kmalloc_size = urb->transfer_buffer_length + sizeof(struct dma_align_buffer) + TEGRA_USB_DMA_ALIGN - 1; kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; /* Position our struct dma_align_buffer such that data is aligned */ temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1; temp->kmalloc_ptr = kmalloc_ptr; temp->old_xfer_buffer = urb->transfer_buffer; /* OUT transaction, DMA to Device */ if (!usb_urb_dir_in(urb)) memcpy(temp->data, urb->transfer_buffer, urb->transfer_buffer_length); urb->transfer_buffer = temp->data; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; return 0; }
static void *kzalloc_aligned(size_t size, gfp_t flags, size_t align) { void *ptr, *aligned_ptr; size_t aligned_size; /* not a power of two */ if (align & (align - 1)) return NULL; /* worst case allocation size */ aligned_size = size + align - 1; /* add extra space to store allocation delta */ aligned_size += sizeof(size_t); /* allocate all space */ ptr = kzalloc(aligned_size, flags); if (!ptr) return NULL; /* calculate the aligned address, making room for the delta value */ aligned_ptr = PTR_ALIGN(ptr + sizeof(size_t), align); /* save the delta before the address returned to caller */ *((size_t *)aligned_ptr - 1) = aligned_ptr - ptr; return aligned_ptr; }
/* * es_notify_vacuum_for_delete () - External storage file cannot be deleted * when transaction is ended and MVCC is * used. Vacuum must be notified instead and * file is deleted when it is no longer * visible. * * return : Void. * thread_p (in) : Thread entry. * uri (in) : File location URI. */ void es_notify_vacuum_for_delete (THREAD_ENTRY * thread_p, const char *uri) { #define ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE \ (INT_ALIGNMENT + /* Aligning buffer start */ \ OR_INT_SIZE + /* String length */ \ ES_MAX_URI_LEN + /* URI string */ \ INT_ALIGNMENT) /* Alignment of packed string */ LOG_DATA_ADDR addr; int length; char data_buf[ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE]; char *data = NULL; addr.offset = -1; addr.pgptr = NULL; addr.vfid = NULL; /* Compute the total length required to pack string */ length = or_packed_string_length (uri, NULL); /* Check there is enough space in data buffer to pack the string */ assert (length <= ES_NOTIFY_VACUUM_FOR_DELETE_BUFFER_SIZE - INT_ALIGNMENT); /* Align buffer to prepare for packing string */ data = PTR_ALIGN (data_buf, INT_ALIGNMENT); /* Pack string */ (void) or_pack_string (data, uri); /* This is not actually ever undone, but vacuum will process undo data of log entry. */ log_append_undo_data (thread_p, RVES_NOTIFY_VACUUM, &addr, length, data); }
static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { return (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *) crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN); }
static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) { bool ret = false; u32 reg; void *addr; dma_addr_t phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; void *orig_addr; dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; u32 crc32; orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, GFP_KERNEL); if (!orig_addr) { dev_err(dev, "failed to allocate address\n"); ret = false; goto err; } if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { phys_addr = PTR_ALIGN(orig_phys_addr, alignment); offset = phys_addr - orig_phys_addr; addr = orig_addr + offset; } else { phys_addr = orig_phys_addr; addr = orig_addr; } get_random_bytes(addr, size); crc32 = crc32_le(~0, addr, size); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, crc32); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, lower_32_bits(phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, upper_32_bits(phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 1 << MSI_NUMBER_SHIFT | COMMAND_READ); wait_for_completion(&test->irq_raised); reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); if (reg & STATUS_READ_SUCCESS) ret = true; dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); err: return ret; }
static dma_addr_t xway_gphy_load(struct platform_device *pdev) { const struct firmware *fw; dma_addr_t dev_addr = 0; const char *fw_name; void *fw_addr; size_t size; if (of_get_property(pdev->dev.of_node, "firmware1", NULL) || of_get_property(pdev->dev.of_node, "firmware2", NULL)) { switch (ltq_soc_type()) { case SOC_TYPE_VR9: if (of_property_read_string(pdev->dev.of_node, "firmware1", &fw_name)) { dev_err(&pdev->dev, "failed to load firmware filename\n"); return 0; } break; case SOC_TYPE_VR9_2: if (of_property_read_string(pdev->dev.of_node, "firmware2", &fw_name)) { dev_err(&pdev->dev, "failed to load firmware filename\n"); return 0; } break; } } else if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) { dev_err(&pdev->dev, "failed to load firmware filename\n"); return 0; } dev_info(&pdev->dev, "requesting %s\n", fw_name); if (request_firmware(&fw, fw_name, &pdev->dev)) { dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name); return 0; } /* * GPHY cores need the firmware code in a persistent and contiguous * memory area with a 16 kB boundary aligned start address */ size = fw->size + XRX200_GPHY_FW_ALIGN; fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL); if (fw_addr) { fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN); memcpy(fw_addr, fw->data, fw->size); } else { dev_err(&pdev->dev, "failed to alloc firmware memory\n"); } release_firmware(fw); return dev_addr; }
static inline struct generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm) { unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return PTR_ALIGN(crypto_aead_ctx(tfm), align); }
static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, unsigned int key_len) { int ret = 0; struct crypto_tfm *tfm = crypto_aead_tfm(parent); struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm); struct aesni_rfc4106_gcm_ctx *child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child); u8 *new_key_mem = NULL; if (key_len < 4) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /*Account for 4 byte nonce at the end.*/ key_len -= 4; if (key_len != AES_KEYSIZE_128) { crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) return -EINVAL; if ((unsigned long)key % AESNI_ALIGN) { /*key is not aligned: use an auxuliar aligned pointer*/ new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL); if (!new_key_mem) return -ENOMEM; new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN); memcpy(new_key_mem, key, key_len); key = new_key_mem; } if (!irq_fpu_usable()) ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), key, key_len); else { kernel_fpu_begin(); ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); kernel_fpu_end(); } /*This must be on a 16 byte boundary!*/ if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { ret = -EINVAL; goto exit; } ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); memcpy(child_ctx, ctx, sizeof(*ctx)); exit: kfree(new_key_mem); return ret; }
static void rfc4106_exit(struct crypto_tfm *tfm) { struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN); if (!IS_ERR(ctx->cryptd_tfm)) cryptd_free_aead(ctx->cryptd_tfm); return; }
/** * Adds radiotap header * * Any error indicated as "Bad FCS" * * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: * - Rx descriptor: 32 bytes * - Phy info */ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, struct sk_buff *skb) { struct wireless_dev *wdev = wil->wdev; struct wil6210_rtap { struct ieee80211_radiotap_header rthdr; /* fields should be in the order of bits in rthdr.it_present */ /* flags */ u8 flags; /* channel */ __le16 chnl_freq __aligned(2); __le16 chnl_flags; /* MCS */ u8 mcs_present; u8 mcs_flags; u8 mcs_index; } __packed; struct wil6210_rtap_vendor { struct wil6210_rtap rtap; /* vendor */ u8 vendor_oui[3] __aligned(2); u8 vendor_ns; __le16 vendor_skip; u8 vendor_data[0]; } __packed; struct vring_rx_desc *d = wil_skb_rxdesc(skb); struct wil6210_rtap_vendor *rtap_vendor; int rtap_len = sizeof(struct wil6210_rtap); int phy_length = 0; /* phy info header size, bytes */ static char phy_data[128]; struct ieee80211_channel *ch = wdev->preset_chandef.chan; if (rtap_include_phy_info) { rtap_len = sizeof(*rtap_vendor) + sizeof(*d); /* calculate additional length */ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { /** * PHY info starts from 8-byte boundary * there are 8-byte lines, last line may be partially * written (HW bug), thus FW configures for last line * to be excessive. Driver skips this last line. */ int len = min_t(int, 8 + sizeof(phy_data), wil_rxdesc_phy_length(d)); if (len > 8) { void *p = skb_tail_pointer(skb); void *pa = PTR_ALIGN(p, 8); if (skb_tailroom(skb) >= len + (pa - p)) { phy_length = len - 8; memcpy(phy_data, pa, phy_length); } } } rtap_len += phy_length; }
static void eseqiv_complete2(struct skcipher_givcrypt_request *req) { struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, crypto_ablkcipher_alignmask(geniv) + 1), crypto_ablkcipher_ivsize(geniv)); }
static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { unsigned long align = AESNI_ALIGN; if (align <= crypto_tfm_ctx_alignment()) align = 1; return PTR_ALIGN(crypto_aead_ctx(tfm), align); }
int crypto_stream_xor(unsigned char *out, const unsigned char *in, unsigned long long inlen, const unsigned char *n, const unsigned char *k) { #define CTX_TYPE struct aes_ctx_bitslice #define PTR_ALIGN(ptr, mask) ((void *)((((long)(ptr)) + (mask)) & ~((long)(mask)))) const unsigned long align = 16; char ctxbuf[sizeof(CTX_TYPE) + align]; CTX_TYPE *ctx = PTR_ALIGN(ctxbuf, align - 1); uint128_t iv; aes_init_bitslice(ctx, k, CRYPTO_KEYBYTES); bswap128(&iv, (const uint128_t *)n); /* be => le */ if (likely(inlen >= PARALLEL_BLOCKS * BLOCKSIZE)) { unsigned long chunks = inlen / (PARALLEL_BLOCKS * BLOCKSIZE); aes_ctr_8way(ctx, out, in, &iv, chunks); inlen -= chunks * PARALLEL_BLOCKS * BLOCKSIZE; out += chunks * PARALLEL_BLOCKS * BLOCKSIZE; in += unlikely(in) ? chunks * PARALLEL_BLOCKS * BLOCKSIZE : 0; } if (unlikely(inlen > 0)) { uint128_t buf[PARALLEL_BLOCKS]; unsigned int i, j; aes_ctr_8way(ctx, buf, NULL, &iv, 1); if (in) { for (i = 0; inlen >= BLOCKSIZE; i++) { xor128((uint128_t *)out, (uint128_t *)in, &buf[i]); inlen -= BLOCKSIZE; in += BLOCKSIZE; out += BLOCKSIZE; } for (j = 0; j < inlen; j++) out[j] = in[j] ^ ((uint8_t*)&buf[i])[j]; } else { for (i = 0; inlen >= BLOCKSIZE; i++) { mov128((uint128_t *)out, &buf[i]); inlen -= BLOCKSIZE; out += BLOCKSIZE; } for (j = 0; j < inlen; j++) out[j] = ((uint8_t*)&buf[i])[j]; } } return 0; }
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) { u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); memcpy(tmp, in, count * AES_BLOCK_SIZE); return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); }
static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) { u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); memcpy(tmp, in, count * AES_BLOCK_SIZE); rep_xcrypt_ecb(tmp, out, key, cword, count); }
static void *unflatten_dt_alloc(void **mem, unsigned long size, unsigned long align) { void *res; *mem = PTR_ALIGN(*mem, align); res = *mem; *mem += size; return res; }
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, int enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); struct aead_request *subreq = &rctx->subreq; struct scatterlist *dst = req->dst; struct scatterlist *cipher = rctx->cipher; struct scatterlist *payload = rctx->payload; struct scatterlist *assoc = rctx->assoc; unsigned int authsize = crypto_aead_authsize(aead); unsigned int assoclen = req->assoclen; struct page *dstp; u8 *vdst; u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); /* construct cipher/plaintext */ if (enc) memset(rctx->auth_tag, 0, authsize); else scatterwalk_map_and_copy(rctx->auth_tag, dst, req->cryptlen - authsize, authsize, 0); sg_init_one(cipher, rctx->auth_tag, authsize); /* construct the aad */ dstp = sg_page(dst); vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset; sg_init_table(payload, 2); sg_set_buf(payload, req->iv, 8); scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); sg_init_table(assoc, 2); sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, req->assoc->offset); scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv); aead_request_set_assoc(subreq, assoc, assoclen); return subreq; }
static int pxafb_probe(struct device_d *dev) { struct pxafb_platform_data *pdata = dev->platform_data; struct pxafb_info *fbi; struct fb_info *info; int ret; if (!pdata) return -ENODEV; fbi = xzalloc(sizeof(*fbi)); info = &fbi->info; fbi->mode = pdata->mode; fbi->regs = dev_request_mem_region(dev, 0); if (IS_ERR(fbi->regs)) return PTR_ERR(fbi->regs); fbi->dev = dev; fbi->lcd_power = pdata->lcd_power; fbi->backlight_power = pdata->backlight_power; info->mode = &pdata->mode->mode; info->fbops = &pxafb_ops; info->xres = pdata->mode->mode.xres; info->yres = pdata->mode->mode.yres; info->bits_per_pixel = pdata->mode->bpp; pxafb_decode_mach_info(fbi, pdata); dev_info(dev, "PXA Framebuffer driver\n"); if (pdata->framebuffer) fbi->info.screen_base = pdata->framebuffer; else fbi->info.screen_base = PTR_ALIGN(dma_alloc_coherent(info->xres * info->yres * (info->bits_per_pixel >> 3) + PAGE_SIZE), PAGE_SIZE); fbi->dma_buff = PTR_ALIGN(dma_alloc_coherent(sizeof(struct pxafb_dma_buff) + 16), 16); pxafb_activate_var(fbi); ret = register_framebuffer(&fbi->info); if (ret < 0) { dev_err(dev, "failed to register framebuffer\n"); return ret; } return 0; }
static inline struct aead_givcrypt_request *esp_tmp_givreq( struct crypto_aead *aead, u8 *iv) { struct aead_givcrypt_request *req; ASF_FP_LINUX_CRYPTO_FENTRY; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_givcrypt_set_tfm(req, aead); ASF_FP_LINUX_CRYPTO_FEXIT; return req; }
static struct ath10k_ce_ring * ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_ce_ring *dest_ring; u32 nentries; dma_addr_t base_addr; nentries = roundup_pow_of_two(attr->dest_nentries); dest_ring = kzalloc(sizeof(*dest_ring) + (nentries * sizeof(*dest_ring->per_transfer_context)), GFP_KERNEL); if (dest_ring == NULL) return ERR_PTR(-ENOMEM); dest_ring->nentries = nentries; dest_ring->nentries_mask = nentries - 1; /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = dma_alloc_coherent(ar->dev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); return ERR_PTR(-ENOMEM); } dest_ring->base_addr_ce_space_unaligned = base_addr; /* * Correctly initialize memory to 0 to prevent garbage * data crashing system when download firmware */ memset(dest_ring->base_addr_owner_space_unaligned, 0, nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); dest_ring->base_addr_owner_space = PTR_ALIGN( dest_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); dest_ring->base_addr_ce_space = ALIGN( dest_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); return dest_ring; }
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) { /* * Padlock prefetches extra data so we must provide mapped input buffers. * Assume there are at least 16 bytes of stack already in use. */ u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); memcpy(tmp, in, count * AES_BLOCK_SIZE); return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); }
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info) { int rc; void *base_struct_unaligned; struct sis_base_struct *base_struct; struct sis_sync_cmd_params params; unsigned long error_buffer_paddr; dma_addr_t bus_address; base_struct_unaligned = kzalloc(sizeof(*base_struct) + SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL); if (!base_struct_unaligned) return -ENOMEM; base_struct = PTR_ALIGN(base_struct_unaligned, SIS_BASE_STRUCT_ALIGNMENT); error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle; put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision); put_unaligned_le32(lower_32_bits(error_buffer_paddr), &base_struct->error_buffer_paddr_low); put_unaligned_le32(upper_32_bits(error_buffer_paddr), &base_struct->error_buffer_paddr_high); put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH, &base_struct->error_buffer_element_length); put_unaligned_le32(ctrl_info->max_io_slots, &base_struct->error_buffer_num_elements); bus_address = pci_map_single(ctrl_info->pci_dev, base_struct, sizeof(*base_struct), PCI_DMA_TODEVICE); if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) { rc = -ENOMEM; goto out; } memset(¶ms, 0, sizeof(params)); params.mailbox[1] = lower_32_bits((u64)bus_address); params.mailbox[2] = upper_32_bits((u64)bus_address); params.mailbox[3] = sizeof(*base_struct); rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS, ¶ms); pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct), PCI_DMA_TODEVICE); out: kfree(base_struct_unaligned); return rc; }
static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev) { struct gelic_card *card; struct gelic_port *port; void *p; size_t alloc_size; /* * gelic requires dma descriptor is 32 bytes aligned and * the hypervisor requires irq_status is 8 bytes aligned. */ BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8); BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32); alloc_size = sizeof(struct gelic_card) + sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS + sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS + GELIC_ALIGN - 1; p = kzalloc(alloc_size, GFP_KERNEL); if (!p) return NULL; card = PTR_ALIGN(p, GELIC_ALIGN); card->unalign = p; /* * alloc netdev */ *netdev = alloc_etherdev(sizeof(struct gelic_port)); if (!netdev) { kfree(card->unalign); return NULL; } port = netdev_priv(*netdev); /* gelic_port */ port->netdev = *netdev; port->card = card; port->type = GELIC_PORT_ETHERNET; /* gelic_card */ card->netdev[GELIC_PORT_ETHERNET] = *netdev; INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); init_waitqueue_head(&card->waitq); atomic_set(&card->tx_timeout_task_counter, 0); mutex_init(&card->updown_lock); atomic_set(&card->users, 0); return card; }