void BVH4BuilderFast::build_sequential(size_t threadIndex, size_t threadCount) { /* start measurement */ double t0 = 0.0f; if (g_verbose >= 2) t0 = getSeconds(); /* initialize node and leaf allocator */ nodeAllocator.reset(); primAllocator.reset(); __aligned(64) Allocator nodeAlloc(nodeAllocator); __aligned(64) Allocator leafAlloc(primAllocator); /* create prim refs */ global_bounds.reset(); computePrimRefs(0,1); bvh->bounds = global_bounds.geometry; /* create initial build record */ BuildRecord br; br.init(global_bounds,0,numPrimitives); br.depth = 1; br.parentNode = (size_t)&bvh->root; /* build BVH in single thread */ recurseSAH(br,nodeAlloc,leafAlloc,RECURSE_SEQUENTIAL,threadIndex,threadCount); /* stop measurement */ if (g_verbose >= 2) dt = getSeconds()-t0; }
void BVH4BuilderFast::buildSubTrees(const size_t threadID, const size_t numThreads) { __aligned(64) Allocator nodeAlloc(nodeAllocator); __aligned(64) Allocator leafAlloc(primAllocator); while (true) { BuildRecord br; if (!g_state->workStack.pop_largest(br)) // FIXME: might loose threads during build { /* global work queue empty => try to steal from neighboring queues */ bool success = false; for (size_t i=0; i<numThreads; i++) { if (g_state->threadStack[(threadID+i)%numThreads].pop_smallest(br)) { success = true; break; } } /* found nothing to steal ? */ if (!success) break; } /* process local work queue */ recurseSAH(br,nodeAlloc,leafAlloc,RECURSE_PARALLEL,threadID,numThreads); while (g_state->threadStack[threadID].pop_largest(br)) recurseSAH(br,nodeAlloc,leafAlloc,RECURSE_PARALLEL,threadID,numThreads); } }
/** * Adds radiotap header * * Any error indicated as "Bad FCS" * * Vendor data for 04:ce:14-1 (Wilocity-1) consists of: * - Rx descriptor: 32 bytes * - Phy info */ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, struct sk_buff *skb) { struct wireless_dev *wdev = wil->wdev; struct wil6210_rtap { struct ieee80211_radiotap_header rthdr; /* fields should be in the order of bits in rthdr.it_present */ /* flags */ u8 flags; /* channel */ __le16 chnl_freq __aligned(2); __le16 chnl_flags; /* MCS */ u8 mcs_present; u8 mcs_flags; u8 mcs_index; } __packed; struct wil6210_rtap_vendor { struct wil6210_rtap rtap; /* vendor */ u8 vendor_oui[3] __aligned(2); u8 vendor_ns; __le16 vendor_skip; u8 vendor_data[0]; } __packed; struct vring_rx_desc *d = wil_skb_rxdesc(skb); struct wil6210_rtap_vendor *rtap_vendor; int rtap_len = sizeof(struct wil6210_rtap); int phy_length = 0; /* phy info header size, bytes */ static char phy_data[128]; struct ieee80211_channel *ch = wdev->preset_chandef.chan; if (rtap_include_phy_info) { rtap_len = sizeof(*rtap_vendor) + sizeof(*d); /* calculate additional length */ if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { /** * PHY info starts from 8-byte boundary * there are 8-byte lines, last line may be partially * written (HW bug), thus FW configures for last line * to be excessive. Driver skips this last line. */ int len = min_t(int, 8 + sizeof(phy_data), wil_rxdesc_phy_length(d)); if (len > 8) { void *p = skb_tail_pointer(skb); void *pa = PTR_ALIGN(p, 8); if (skb_tailroom(skb) >= len + (pa - p)) { phy_length = len - 8; memcpy(phy_data, pa, phy_length); } } } rtap_len += phy_length; }
/** * brcmf_skb_is_iapp - checks if skb is an IAPP packet * * @skb: skb to check */ static bool brcmf_skb_is_iapp(struct sk_buff *skb) { static const u8 iapp_l2_update_packet[6] __aligned(2) = { 0x00, 0x01, 0xaf, 0x81, 0x01, 0x00, }; unsigned char *eth_data; #if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) const u16 *a, *b; #endif if (skb->len - skb->mac_len != 6 || !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) return false; eth_data = skb_mac_header(skb) + ETH_HLEN; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return !(((*(const u32 *)eth_data) ^ (*(const u32 *)iapp_l2_update_packet)) | ((*(const u16 *)(eth_data + 4)) ^ (*(const u16 *)(iapp_l2_update_packet + 4)))); #else a = (const u16 *)eth_data; b = (const u16 *)iapp_l2_update_packet; return !((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])); #endif }
void __vesacon_copy_to_screen(size_t dst, const uint32_t * src, size_t npixels) { size_t win_pos, win_off; size_t win_size = wi.win_size; size_t omask = win_size - 1; char *win_base = wi.win_base; size_t l; size_t bytes = npixels * __vesacon_bytes_per_pixel; char rowbuf[bytes + 4] __aligned(4); const char *s; s = (const char *)__vesacon_format_pixels(rowbuf, src, npixels); while (bytes) { win_off = dst & omask; win_pos = dst & ~omask; if (__unlikely(win_pos != wi.win_pos)) set_window_pos(win_pos); l = min(bytes, win_size - win_off); memcpy(win_base + win_off, s, l); bytes -= l; s += l; dst += l; } }
void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct task_struct *parent; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_FORK; rcu_read_lock(); parent = rcu_dereference(task->real_parent); ev->event_data.fork.parent_pid = parent->pid; ev->event_data.fork.parent_tgid = parent->tgid; rcu_read_unlock(); ev->event_data.fork.child_pid = task->pid; ev->event_data.fork.child_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ send_msg(msg); }
void fix16_vector3_str(const fix16_vector3_t *v0, char *buf, int decimals) { char component_buf[13] __aligned(16); size_t component_buf_len; char *buf_ptr; buf_ptr = buf; *buf_ptr++ = '('; fix16_to_str(v0->x, component_buf, decimals); component_buf_len = strlen(component_buf); memcpy(buf_ptr, component_buf, component_buf_len); buf_ptr += component_buf_len; *buf_ptr++ = ','; fix16_to_str(v0->y, component_buf, decimals); component_buf_len = strlen(component_buf); memcpy(buf_ptr, component_buf, component_buf_len); buf_ptr += component_buf_len; *buf_ptr++ = ','; fix16_to_str(v0->z, component_buf, decimals); component_buf_len = strlen(component_buf); memcpy(buf_ptr, component_buf, component_buf_len); buf_ptr += component_buf_len; *buf_ptr++ = ')'; *buf_ptr = '\0'; }
void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
/* * Send an acknowledgement message to userspace * * Use 0 for success, EFOO otherwise. * Note: this is the negative of conventional kernel error * values because it's not being returned via syscall return * mechanisms. */ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ ev->timestamp_ns = timespec_to_ns(&ts); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); struct timespec ts; struct task_struct *parent; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_FORK; rcu_read_lock(); parent = rcu_dereference(task->real_parent); ev->event_data.fork.parent_pid = parent->pid; ev->event_data.fork.parent_tgid = parent->tgid; rcu_read_unlock(); ev->event_data.fork.child_pid = task->pid; ev->event_data.fork.child_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ /* If cn_netlink_send() failed, the data is not sent */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
static inline offset_t __npg_bk_op(offset_t start, offset_t end, offset_t upper, uint64_t attr, npg_op_t *op, uint8_t act) { offset_t addr; if(op->nxt) { offset_t start_up = __align_next(start, op->sz); bool_t diff_tbl = (pg_abs_idx(start, op->shf) != pg_abs_idx(end, op->shf)); if(__aligned(start, op->sz) && diff_tbl) { op->fnc[act](start, attr); addr = start_up; } else addr = __npg_bk_op(start, end, start_up, attr, op->nxt, act); } else addr = __align(start, op->sz); while(addr < min(__align(end, op->sz), upper)) { op->fnc[act](addr, attr); addr += op->sz; } return addr; }
void proc_ptrace_connector(struct task_struct *task, int ptrace_id) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_tgid = task->tgid; if (ptrace_id == PTRACE_ATTACH) { ev->event_data.ptrace.tracer_pid = current->pid; ev->event_data.ptrace.tracer_tgid = current->tgid; } else if (ptrace_id == PTRACE_DETACH) { ev->event_data.ptrace.tracer_pid = 0; ev->event_data.ptrace.tracer_tgid = 0; } else return; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ send_msg(msg); }
void Slave::RenderTask::run(size_t threadIndex, size_t threadCount, size_t taskIndex, size_t taskCount, TaskScheduler::Event* event) { // PING; const size_t tileID = taskIndex; if ((tileID % worker.size) != worker.rank) return; // PING; Tile __aligned(64) tile; const size_t tile_y = tileID / numTiles_x; const size_t tile_x = tileID - tile_y*numTiles_x; tile.region.lower.x = tile_x * TILE_SIZE; tile.region.lower.y = tile_y * TILE_SIZE; tile.region.upper.x = std::min(tile.region.lower.x+TILE_SIZE,fb->size.x); tile.region.upper.y = std::min(tile.region.lower.y+TILE_SIZE,fb->size.y); tile.fbSize = fb->size; tile.rcp_fbSize = rcp(vec2f(fb->size)); renderer->renderTile(tile); ospray::LocalFrameBuffer *localFB = (ospray::LocalFrameBuffer *)fb.ptr; uint32 rgba_i8[TILE_SIZE][TILE_SIZE]; for (int iy=tile.region.lower.y;iy<tile.region.upper.y;iy++) for (int ix=tile.region.lower.x;ix<tile.region.upper.x;ix++) { rgba_i8[iy-tile.region.lower.y][ix-tile.region.lower.x] = ((uint32*)localFB->colorBuffer)[ix+iy*localFB->size.x]; } MPI_Send(&tile.region,4,MPI_INT,0,tileID,app.comm); int count = (TILE_SIZE)*(TILE_SIZE); MPI_Send(&rgba_i8,count,MPI_INT,0,tileID,app.comm); }
void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); ev->timestamp_ns = ktime_get_ns(); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ send_msg(msg); }
void proc_ptrace_connector(struct task_struct *task, int ptrace_id) { struct cn_msg *msg; struct proc_event *ev; struct timespec ts; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) return; msg = buffer_to_cn_msg(buffer); ev = (struct proc_event *)msg->data; memset(&ev->event_data, 0, sizeof(ev->event_data)); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ ev->timestamp_ns = timespec_to_ns(&ts); ev->what = PROC_EVENT_PTRACE; ev->event_data.ptrace.process_pid = task->pid; ev->event_data.ptrace.process_tgid = task->tgid; if (ptrace_id == PTRACE_ATTACH) { ev->event_data.ptrace.tracer_pid = current->pid; ev->event_data.ptrace.tracer_tgid = current->tgid; } else if (ptrace_id == PTRACE_DETACH) { ev->event_data.ptrace.tracer_pid = 0; ev->event_data.ptrace.tracer_tgid = 0; } else return; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); msg->flags = 0; /* not used */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); }
void _TEE_MathAPI_Init(void) { static uint8_t data[MPI_MEMPOOL_SIZE] __aligned(MEMPOOL_ALIGN); mbedtls_mpi_mempool = mempool_alloc_pool(data, sizeof(data), NULL); if (!mbedtls_mpi_mempool) API_PANIC("Failed to initialize memory pool"); }
/* This should trip the stack canary, not corrupt the return address. */ noinline void lkdtm_CORRUPT_STACK(void) { /* Use default char array length that triggers stack protection. */ char data[8] __aligned(sizeof(void *)); __lkdtm_CORRUPT_STACK(&data); pr_info("Corrupted stack containing char array ...\n"); }
static int ccm_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead); struct skcipher_walk walk; u8 __aligned(8) mac[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE]; u32 len = req->cryptlen; int err; err = ccm_init_mac(req, mac, len); if (err) return err; if (req->assoclen) ccm_calculate_auth_mac(req, mac); /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); err = skcipher_walk_aead_encrypt(&walk, req, false); if (crypto_simd_usable()) { while (walk.nbytes) { u32 tail = walk.nbytes % AES_BLOCK_SIZE; if (walk.nbytes == walk.total) tail = 0; kernel_neon_begin(); ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes - tail, ctx->key_enc, num_rounds(ctx), mac, walk.iv); kernel_neon_end(); err = skcipher_walk_done(&walk, tail); } if (!err) { kernel_neon_begin(); ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); kernel_neon_end(); } } else { err = ccm_crypt_fallback(&walk, mac, buf, ctx, true); } if (err) return err; /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; }
void tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey) { int ret = 0; uint8_t hw_unq_key[sizeof(hwkey->data)] __aligned(64); ret = get_hw_unique_key(OPTEE_SMC_FAST_CALL_SIP_LS_HW_UNQ_KEY, virt_to_phys(hw_unq_key), sizeof(hwkey->data)); if (ret < 0) EMSG("\nH/W Unique key is not fetched from the platform."); else memcpy(&hwkey->data[0], hw_unq_key, sizeof(hwkey->data)); }
size_t SubdivPatch1Base::get64BytesBlocksForGridSubTree(const GridRange& range, const unsigned int leafBlocks) { if (range.hasLeafSize()) return leafBlocks; __aligned(64) GridRange r[4]; const unsigned int children = range.splitIntoSubRanges(r); size_t blocks = 2; /* 128 bytes bvh4 node layout */ for (unsigned int i=0;i<children;i++) blocks += get64BytesBlocksForGridSubTree(r[i],leafBlocks); return blocks; }
long probe_kernel_write(void *dst, const void *src, size_t size) { unsigned long ldst = (unsigned long)dst; void __iomem *iodst = (void __iomem *)dst; unsigned long lsrc = (unsigned long)src; const u8 *psrc = (u8 *)src; unsigned int pte, i; u8 bounce[8] __aligned(8); if (!size) return 0; /* Use the write combine bit to decide is the destination is MMIO. */ pte = __builtin_meta2_cacherd(dst); /* Check the mapping is valid and writeable. */ if ((pte & (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) != (MMCU_ENTRY_WR_BIT | MMCU_ENTRY_VAL_BIT)) return -EFAULT; /* Fall back to generic version for cases we're not interested in. */ if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */ (ldst & (size - 1)) || /* destination unaligned */ size > 8 || /* more than max write size */ (size & (size - 1))) /* non power of 2 size */ return __probe_kernel_write(dst, src, size); /* If src is unaligned, copy to the aligned bounce buffer first. */ if (lsrc & (size - 1)) { for (i = 0; i < size; ++i) bounce[i] = psrc[i]; psrc = bounce; } switch (size) { case 1: writeb(*psrc, iodst); break; case 2: writew(*(const u16 *)psrc, iodst); break; case 4: writel(*(const u32 *)psrc, iodst); break; case 8: writeq(*(const u64 *)psrc, iodst); break; } return 0; }
static inline void __npg_fw_op(offset_t start, offset_t end, uint64_t attr, npg_op_t *op, uint8_t act) { offset_t addr = start; while(addr < __align(end, op->sz)) { op->fnc[act](addr, attr); addr += op->sz; } if(op->nxt && !__aligned(end, op->sz)) __npg_fw_op(addr, end, attr, op->nxt, act); }
static int htif_enumerate(struct htif_softc *sc) { char id[HTIF_ID_LEN] __aligned(HTIF_ALIGN); uint64_t paddr; uint64_t data; uint64_t cmd; int len; int i; device_printf(sc->dev, "Enumerating devices\n"); for (i = 0; i < HTIF_NDEV; i++) { paddr = pmap_kextract((vm_offset_t)&id); data = (paddr << IDENTIFY_PADDR_SHIFT); data |= IDENTIFY_IDENT; sc->identify_id = i; sc->identify_done = 0; cmd = i; cmd <<= HTIF_DEV_ID_SHIFT; cmd |= (HTIF_CMD_IDENTIFY << HTIF_CMD_SHIFT); cmd |= data; htif_command(cmd); /* Do poll as interrupts are disabled yet */ while (sc->identify_done == 0) { htif_handle_entry(sc); } len = strnlen(id, sizeof(id)); if (len <= 0) break; if (bootverbose) printf(" %d %s\n", i, id); if (strncmp(id, "disk", 4) == 0) htif_add_device(sc, i, id, "htif_blk"); else if (strncmp(id, "bcd", 3) == 0) htif_add_device(sc, i, id, "htif_console"); else if (strncmp(id, "syscall_proxy", 13) == 0) htif_add_device(sc, i, id, "htif_syscall_proxy"); } return (bus_generic_attach(sc->dev)); }
void ofw_pci_dmamap_sync_stst_order_common(void) { static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE); register_t reg, s; s = intr_disable(); reg = rd(fprs); wr(fprs, reg | FPRS_FEF, 0); __asm __volatile("stda %%f0, [%0] %1" : : "r" (buf), "n" (ASI_BLK_COMMIT_S)); membar(Sync); wr(fprs, reg, 0); intr_restore(s); }
static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int err, first, rounds = 6 + ctx->key_length / 4; struct blkcipher_walk walk; int blocks; desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); first = 1; kernel_neon_begin(); while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, (u8 *)ctx->key_enc, rounds, blocks, walk.iv, first); first = 0; nbytes -= blocks * AES_BLOCK_SIZE; if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE) break; err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } if (nbytes) { u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; u8 __aligned(8) tail[AES_BLOCK_SIZE]; /* * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need * to tell aes_ctr_encrypt() to only read half a block. */ blocks = (nbytes <= 8) ? -1 : 1; aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds, blocks, walk.iv, first); memcpy(tdst, tail, nbytes); err = blkcipher_walk_done(desc, &walk, 0); } kernel_neon_end(); return err; }
static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { /* aligned to potentially speed up crypto_xor() */ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long)); while (bytes >= CHACHA_BLOCK_SIZE) { chacha_block(state, stream, nrounds); crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE); bytes -= CHACHA_BLOCK_SIZE; dst += CHACHA_BLOCK_SIZE; src += CHACHA_BLOCK_SIZE; } if (bytes) { chacha_block(state, stream, nrounds); crypto_xor_cpy(dst, src, stream, bytes); } }
void pgt_init(void) { /* * We're putting this in .nozi.* instead of .bss because .nozi.* already * has a large alignment, while .bss has a small alignment. The current * link script is optimized for small alignment in .bss */ static uint8_t pgt_tables[PGT_CACHE_SIZE][PGT_SIZE] __aligned(PGT_SIZE) __section(".nozi.pgt_cache"); size_t n; for (n = 0; n < ARRAY_SIZE(pgt_tables); n++) { struct pgt *p = pgt_entries + n; p->tbl = pgt_tables[n]; SLIST_INSERT_HEAD(&pgt_free_list, p, link); } }
void db_put_value(db_addr_t addr, size_t size, db_expr_t value) { char data[sizeof(db_expr_t)] __aligned(sizeof(db_expr_t)); size_t i; #if BYTE_ORDER == LITTLE_ENDIAN for (i = 0; i < size; i++) #else /* BYTE_ORDER == BIG_ENDIAN */ for (i = size; i-- > 0;) #endif /* BYTE_ORDER */ { data[i] = value & 0xFF; value >>= 8; } db_write_bytes(addr, size, data); }
static int env_fat_save(void) { env_t __aligned(ARCH_DMA_MINALIGN) env_new; struct blk_desc *dev_desc = NULL; disk_partition_t info; int dev, part; int err; loff_t size; err = env_export(&env_new); if (err) return err; part = blk_get_device_part_str(CONFIG_ENV_FAT_INTERFACE, CONFIG_ENV_FAT_DEVICE_AND_PART, &dev_desc, &info, 1); if (part < 0) return 1; dev = dev_desc->devnum; if (fat_set_blk_dev(dev_desc, &info) != 0) { /* * This printf is embedded in the messages from env_save that * will calling it. The missing \n is intentional. */ printf("Unable to use %s %d:%d... ", CONFIG_ENV_FAT_INTERFACE, dev, part); return 1; } err = file_fat_write(CONFIG_ENV_FAT_FILE, (void *)&env_new, 0, sizeof(env_t), &size); if (err == -1) { /* * This printf is embedded in the messages from env_save that * will calling it. The missing \n is intentional. */ printf("Unable to write \"%s\" from %s%d:%d... ", CONFIG_ENV_FAT_FILE, CONFIG_ENV_FAT_INTERFACE, dev, part); return 1; } return 0; }
TEE_Result tee_otp_get_hw_unique_key(struct tee_hw_unique_key *hwkey) { TEE_Result res; int ret = 0; uint8_t hw_unq_key[sizeof(hwkey->data)] __aligned(64); ret = get_hw_unique_key(OPTEE_SMC_FAST_CALL_SIP_LS_HW_UNQ_KEY, virt_to_phys(hw_unq_key), sizeof(hwkey->data)); if (ret < 0) { EMSG("\nH/W Unique key is not fetched from the platform."); res = TEE_ERROR_SECURITY; } else { memcpy(&hwkey->data[0], hw_unq_key, sizeof(hwkey->data)); res = TEE_SUCCESS; } return res; }