/* Called from syscall */ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) { printk("* In function %s *\n", __FUNCTION__); return ERR_PTR(-ENOMEM); #if 0 struct bpf_bloom *bloom; int err, i; bloom = kzalloc(sizeof(*bloom), GFP_USER); if (!bloom) return ERR_PTR(-ENOMEM); /* mandatory map attributes */ bloom->map.key_size = attr->key_size; bloom->map.value_size = attr->value_size; bloom->map.max_entries = attr->max_entries; /* check sanity of attributes. * value_size == 0 may be allowed in the future to use map as a set */ err = -EINVAL; if (bloom->map.max_entries == 0 || bloom->map.key_size == 0 || bloom->map.value_size == 0) goto free_bloom; /* hash table size must be power of 2 */ bloom->n_buckets = roundup_pow_of_two(bloom->map.max_entries); err = -E2BIG; if (bloom->map.key_size > MAX_BPF_STACK) /* eBPF programs initialize keys on stack, so they cannot be * larger than max stack size */ goto free_bloom; err = -ENOMEM; /* prevent zero size kmalloc and check for u32 overflow */ if (bloom->n_buckets == 0 || bloom->n_buckets > U32_MAX / sizeof(struct hlist_head)) goto free_bloom; bloom->buckets = kmalloc_array(bloom->n_buckets, sizeof(struct hlist_head), GFP_USER | __GFP_NOWARN); if (!bloom->buckets) { bloom->buckets = vmalloc(bloom->n_buckets * sizeof(struct hlist_head)); if (!bloom->buckets) goto free_bloom; } for (i = 0; i < bloom->n_buckets; i++) INIT_HLIST_HEAD(&bloom->buckets[i]); spin_lock_init(&bloom->lock); bloom->count = 0; bloom->elem_size = sizeof(struct bloom_elem) + round_up(bloom->map.key_size, 8) + bloom->map.value_size; return &bloom->map; free_bloom: kfree(bloom); return ERR_PTR(err); #endif }
static void emac_outblk_32bit(void __iomem *reg, void *data, int count) { writesl(reg, data, round_up(count, 4) / 4); }
{ struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; int huge = is_vm_hugetlb_page(vma); ENTER_CRITICAL(flags); if (huge) { start = round_down(start, HPAGE_SIZE); end = round_up(end, HPAGE_SIZE); size = (end - start) >> HPAGE_SHIFT; } else { start = round_down(start, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1); size = (end - start) >> (PAGE_SHIFT + 1); } if (size <= current_cpu_data.tlbsize/2) { int oldpid = read_c0_entryhi(); int newpid = cpu_asid(cpu, mm); while (start < end) { int idx; write_c0_entryhi(start | newpid); if (huge) start += HPAGE_SIZE; else start += (PAGE_SIZE << 1); mtc0_tlbw_hazard();
/** * fname_encrypt() - encrypt a filename * * The caller must have allocated sufficient memory for the @oname string. * * Return: 0 on success, -errno on failure */ static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { struct ablkcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_ablkcipher *tfm = ci->ci_ctfm; int res = 0; char iv[FS_CRYPTO_BLOCK_SIZE]; struct scatterlist sg; int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); unsigned int lim; unsigned int cryptlen; lim = inode->i_sb->s_cop->max_namelen(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; /* * Copy the filename to the output buffer for encrypting in-place and * pad it with the needed number of NUL bytes. */ cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); cryptlen = round_up(cryptlen, padding); cryptlen = min(cryptlen, lim); memcpy(oname->name, iname->name, iname->len); memset(oname->name + iname->len, 0, cryptlen - iname->len); /* Initialize the IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); /* Set up the encryption request */ req = ablkcipher_request_alloc(tfm, GFP_NOFS); if (!req) { printk_ratelimited(KERN_ERR "%s: ablkcipher_request_alloc() failed\n", __func__); return -ENOMEM; } ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, fname_crypt_complete, &ecr); sg_init_one(&sg, oname->name, cryptlen); ablkcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); /* Do the encryption */ res = crypto_ablkcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { /* Request is being completed asynchronously; wait for it */ wait_for_completion(&ecr.completion); res = ecr.res; } ablkcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR "%s: Error (error code %d)\n", __func__, res); return res; } oname->len = cryptlen; return 0; }
/* * Process a shared object's program header. This is used only for the * main program, when the kernel has already loaded the main program * into memory before calling the dynamic linker. It creates and * returns an Obj_Entry structure. */ Obj_Entry * _rtld_digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry) { Obj_Entry *obj; const Elf_Phdr *phlimit = phdr + phnum; const Elf_Phdr *ph; int nsegs = 0; Elf_Addr vaddr; obj = _rtld_obj_new(); for (ph = phdr; ph < phlimit; ++ph) { if (ph->p_type != PT_PHDR) continue; obj->phdr = (void *)(uintptr_t)ph->p_vaddr; obj->phsize = ph->p_memsz; obj->relocbase = (caddr_t)((uintptr_t)phdr - (uintptr_t)ph->p_vaddr); dbg(("headers: phdr %p (%p) phsize %zu relocbase %p", obj->phdr, phdr, obj->phsize, obj->relocbase)); break; } for (ph = phdr; ph < phlimit; ++ph) { vaddr = (Elf_Addr)(uintptr_t)(obj->relocbase + ph->p_vaddr); switch (ph->p_type) { case PT_INTERP: obj->interp = (const char *)(uintptr_t)vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_INTERP", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; case PT_LOAD: assert(nsegs < 2); if (nsegs == 0) { /* First load segment */ obj->vaddrbase = round_down(vaddr); obj->mapbase = (caddr_t)(uintptr_t)obj->vaddrbase; obj->textsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } else { /* Last load segment */ obj->mapsize = round_up(vaddr + ph->p_memsz) - obj->vaddrbase; } ++nsegs; dbg(("headers: %s %p phsize %" PRImemsz, "PT_LOAD", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; case PT_DYNAMIC: obj->dynamic = (Elf_Dyn *)(uintptr_t)vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_DYNAMIC", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) case PT_TLS: obj->tlsindex = 1; obj->tlssize = ph->p_memsz; obj->tlsalign = ph->p_align; obj->tlsinitsize = ph->p_filesz; obj->tlsinit = (void *)(uintptr_t)ph->p_vaddr; dbg(("headers: %s %p phsize %" PRImemsz, "PT_TLS", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #endif #ifdef __ARM_EABI__ case PT_ARM_EXIDX: obj->exidx_start = (void *)(uintptr_t)vaddr; obj->exidx_sz = ph->p_memsz; dbg(("headers: %s %p phsize %" PRImemsz, "PT_ARM_EXIDX", (void *)(uintptr_t)vaddr, ph->p_memsz)); break; #endif } } assert(nsegs == 2); obj->entry = entry; return obj; }
s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf) { struct hal_data_8188e *haldata = GET_HAL_DATA(adapt); struct xmit_frame *pxmitframe = NULL; struct xmit_frame *pfirstframe = NULL; /* aggregate variable */ struct hw_xmit *phwxmit; struct sta_info *psta = NULL; struct tx_servq *ptxservq = NULL; struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL; u32 pbuf; /* next pkt address */ u32 pbuf_tail; /* last pkt tail */ u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */ u32 bulksize = haldata->UsbBulkOutSize; u8 desc_cnt; u32 bulkptr; /* dump frame variable */ u32 ff_hwaddr; RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n")); /* check xmitbuffer is ok */ if (pxmitbuf == NULL) { pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv); if (pxmitbuf == NULL) return false; } /* 3 1. pick up first frame */ rtw_free_xmitframe(pxmitpriv, pxmitframe); pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); if (pxmitframe == NULL) { /* no more xmit frame, release xmit buffer */ rtw_free_xmitbuf(pxmitpriv, pxmitbuf); return false; } pxmitframe->pxmitbuf = pxmitbuf; pxmitframe->buf_addr = pxmitbuf->pbuf; pxmitbuf->priv_data = pxmitframe; pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */ pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe); /* always return ndis_packet after rtw_xmitframe_coalesce */ rtw_os_xmit_complete(adapt, pxmitframe); /* 3 2. aggregate same priority and same DA(AP or STA) frames */ pfirstframe = pxmitframe; len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ); pbuf_tail = len; pbuf = round_up(pbuf_tail, 8); /* check pkt amount in one bulk */ desc_cnt = 0; bulkptr = bulksize; if (pbuf < bulkptr) { desc_cnt++; } else { desc_cnt = 0; bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */ } /* dequeue same priority packet from station tx queue */ psta = pfirstframe->attrib.psta; switch (pfirstframe->attrib.priority) { case 1: case 2: ptxservq = &(psta->sta_xmitpriv.bk_q); phwxmit = pxmitpriv->hwxmits + 3; break; case 4: case 5: ptxservq = &(psta->sta_xmitpriv.vi_q); phwxmit = pxmitpriv->hwxmits + 1; break; case 6: case 7: ptxservq = &(psta->sta_xmitpriv.vo_q); phwxmit = pxmitpriv->hwxmits; break; case 0: case 3: default: ptxservq = &(psta->sta_xmitpriv.be_q); phwxmit = pxmitpriv->hwxmits + 2; break; } spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&ptxservq->sta_pending); xmitframe_plist = xmitframe_phead->next; while (xmitframe_phead != xmitframe_plist) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; pxmitframe->agg_num = 0; /* not first frame of aggregation */ pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */ len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ); if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) { pxmitframe->agg_num = 1; pxmitframe->pkt_offset = 1; break; } list_del_init(&pxmitframe->list); ptxservq->qcnt--; phwxmit->accnt--; pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf; rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe); /* always return ndis_packet after rtw_xmitframe_coalesce */ rtw_os_xmit_complete(adapt, pxmitframe); /* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */ update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true); /* don't need xmitframe any more */ rtw_free_xmitframe(pxmitpriv, pxmitframe); /* handle pointer and stop condition */ pbuf_tail = pbuf + len; pbuf = round_up(pbuf_tail, 8); pfirstframe->agg_num++; if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num) break; if (pbuf < bulkptr) { desc_cnt++; if (desc_cnt == haldata->UsbTxAggDescNum) break; } else { desc_cnt = 0; bulkptr = ((pbuf / bulksize) + 1) * bulksize; } } /* end while (aggregate same priority and same DA(AP or STA) frames) */ if (list_empty(&ptxservq->sta_pending.queue)) list_del_init(&ptxservq->tx_pending); spin_unlock_bh(&pxmitpriv->lock); if ((pfirstframe->attrib.ether_type != 0x0806) && (pfirstframe->attrib.ether_type != 0x888e) && (pfirstframe->attrib.ether_type != 0x88b4) && (pfirstframe->attrib.dhcp_pkt != 1)) rtw_issue_addbareq_cmd(adapt, pfirstframe); /* 3 3. update first frame txdesc */ if ((pbuf_tail % bulksize) == 0) { /* remove pkt_offset */ pbuf_tail -= PACKET_OFFSET_SZ; pfirstframe->buf_addr += PACKET_OFFSET_SZ; pfirstframe->pkt_offset--; } update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true); /* 3 4. write xmit buffer to USB FIFO */ ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe); usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf); /* 3 5. update statisitc */ pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE); pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ); rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail); rtw_free_xmitframe(pxmitpriv, pfirstframe); return true; }
int ccci_alloc_smem(int md_id) { ccci_mem_layout_t *mem_layout_ptr = NULL; int ret = 0; int smem_size = 0; int *smem_vir; dma_addr_t smem_phy; int i,j, base_virt, base_phy; int size; smem_size = cal_md_smem_size(md_id); ret = cfg_md_mem_layout(md_id); if(ret < 0) { CCCI_MSG_INF(md_id, "ctl", "md mem layout config fail\n"); return ret; } mem_layout_ptr = &md_mem_layout_tab[md_id]; #ifdef CCCI_STATIC_SHARED_MEM if (md_mem_layout_tab[md_id].smem_region_size < smem_size) { CCCI_MSG_INF(md_id, "ctl", "[error]CCCI shared mem isn't enough: 0x%08X\n", smem_size); return -ENOMEM; } smem_phy = mem_layout_ptr->smem_region_phy; smem_vir = (int*)ioremap_nocache((unsigned long)smem_phy, smem_size); if (!smem_vir) { CCCI_MSG_INF(md_id, "ctl", "ccci smem ioremap fail\n"); return -ENOMEM; } #else // dynamic allocation shared memory smem_vir = dma_alloc_coherent(NULL, smem_size, &smem_phy, GFP_KERNEL); if (smem_vir == NULL) { CCCI_MSG_INF(md_id, "ctl", "ccci smem dma_alloc_coherent fail\n"); return -CCCI_ERR_GET_MEM_FAIL; } mem_layout_ptr->smem_region_phy = (unsigned int)smem_phy; mem_layout_ptr->smem_region_size = smem_size; #endif mem_layout_ptr->smem_region_vir = (unsigned int)smem_vir; CCCI_CTL_MSG(md_id, "ccci_smem_phy=%x, ccci_smem_size=%d, ccci_smem_virt=%x\n", (unsigned int)smem_phy, smem_size, (unsigned int)smem_vir); WARN_ON(smem_phy&(0x4000-1)||smem_size&(0x4000-1)); // Memory allocate done, config for each sub module base_virt = (int)smem_vir; base_phy = (int)smem_phy; // Total md_smem_tab[md_id].ccci_smem_vir = base_virt; md_smem_tab[md_id].ccci_smem_phy = base_phy; md_smem_tab[md_id].ccci_smem_size = smem_size; //MD runtime data!! Note: This item must be the first!!! md_smem_tab[md_id].ccci_md_runtime_data_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_md_runtime_data_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_md_runtime_data_smem_size = CCCI_MD_RUNTIME_DATA_SMEM_SIZE; base_virt += CCCI_MD_RUNTIME_DATA_SMEM_SIZE; base_phy += CCCI_MD_RUNTIME_DATA_SMEM_SIZE; // EXP md_smem_tab[md_id].ccci_exp_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_exp_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_exp_smem_size = MD_EX_LOG_SIZE; base_virt += MD_EX_LOG_SIZE; base_phy += MD_EX_LOG_SIZE; //MD Exception expand Info md_smem_tab[md_id].ccci_md_ex_exp_info_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_md_ex_exp_info_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_md_ex_exp_info_smem_size = CCCI_MD_EX_EXP_INFO_SMEM_SIZE; base_virt += CCCI_MD_EX_EXP_INFO_SMEM_SIZE; base_phy += CCCI_MD_EX_EXP_INFO_SMEM_SIZE; //Misc info md_smem_tab[md_id].ccci_misc_info_base_virt = base_virt; md_smem_tab[md_id].ccci_misc_info_base_phy = base_phy; md_smem_tab[md_id].ccci_misc_info_size = CCCI_MISC_INFO_SMEM_SIZE; base_virt += CCCI_MISC_INFO_SMEM_SIZE; base_phy += CCCI_MISC_INFO_SMEM_SIZE; base_virt = round_up(base_virt, 0x1000); base_phy = round_up(base_phy, 0x1000); // PCM md_smem_tab[md_id].ccci_pcm_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_pcm_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_pcm_smem_size = pcm_smem_size[md_id]; size = round_up(pcm_smem_size[md_id], 0x1000); base_virt += size; base_phy += size; // LOG md_smem_tab[md_id].ccci_mdlog_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_mdlog_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_mdlog_smem_size = md_log_smem_size[md_id]; size = round_up(md_log_smem_size[md_id], 0x1000); base_virt += size; base_phy += size; // RPC md_smem_tab[md_id].ccci_rpc_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_rpc_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_rpc_smem_size = rpc_smem_size[md_id]; size = round_up(rpc_smem_size[md_id], 0x1000); base_virt += size; base_phy += size; // FS md_smem_tab[md_id].ccci_fs_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_fs_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_fs_smem_size = CCCI_FS_SMEM_SIZE; size = round_up(CCCI_FS_SMEM_SIZE, 0x1000); base_virt += size; base_phy += size; // TTY: tty_meta(uart0), tty_muxd(uart1), ccmni1(uart2), ccmni2(uart3), ccmni3(uart4), tty_ipc(uart5) j = 0; for (i = 0; i < CCCI1_TTY_PORT_NUM-1; i++, j++) { md_smem_tab[md_id].ccci_uart_smem_base_virt[i] = base_virt; md_smem_tab[md_id].ccci_uart_smem_base_phy[i] = base_phy; md_smem_tab[md_id].ccci_uart_smem_size[i] = tty_smem_size[md_id]; base_virt += tty_smem_size[md_id]; base_phy += tty_smem_size[md_id]; } for (i = 0; i < CCMNI_V1_PORT_NUM; i++, j++) { if(net_v1_smem_size[md_id] == 0) { md_smem_tab[md_id].ccci_uart_smem_base_virt[j] = 0; md_smem_tab[md_id].ccci_uart_smem_base_phy[j] = 0; md_smem_tab[md_id].ccci_uart_smem_size[j] = 0; } else { md_smem_tab[md_id].ccci_uart_smem_base_virt[j] = base_virt; md_smem_tab[md_id].ccci_uart_smem_base_phy[j] = base_phy; md_smem_tab[md_id].ccci_uart_smem_size[j] = net_v1_smem_size[md_id]; base_virt += net_v1_smem_size[md_id]; base_phy += net_v1_smem_size[md_id]; } } md_smem_tab[md_id].ccci_uart_smem_base_virt[j] = base_virt; // TTY for IPC md_smem_tab[md_id].ccci_uart_smem_base_phy[j] = base_phy; md_smem_tab[md_id].ccci_uart_smem_size[j] = tty_smem_size[md_id]; base_virt += tty_smem_size[md_id]; base_phy += tty_smem_size[md_id]; j++; for (; j < CCCI_UART_PORT_NUM; j++) { md_smem_tab[md_id].ccci_uart_smem_base_virt[j] = 0; md_smem_tab[md_id].ccci_uart_smem_base_phy[j] = 0; md_smem_tab[md_id].ccci_uart_smem_size[j] = 0; } // PMIC md_smem_tab[md_id].ccci_pmic_smem_base_virt = 0; md_smem_tab[md_id].ccci_pmic_smem_base_phy = 0; md_smem_tab[md_id].ccci_pmic_smem_size = 0; base_virt += 0; base_phy += 0; // SYS md_smem_tab[md_id].ccci_sys_smem_base_virt = 0; md_smem_tab[md_id].ccci_sys_smem_base_phy = 0; md_smem_tab[md_id].ccci_sys_smem_size = 0; base_virt += 0; base_phy += 0; //IPC md_smem_tab[md_id].ccci_ipc_smem_base_virt = base_virt; md_smem_tab[md_id].ccci_ipc_smem_base_phy = base_phy; md_smem_tab[md_id].ccci_ipc_smem_size = CCCI_IPC_SMEM_SIZE; base_virt += CCCI_IPC_SMEM_SIZE ; base_phy += CCCI_IPC_SMEM_SIZE; // CCMNI_V2 -- UP-Link if(net_smem_ul_size[md_id] != 0) { md_smem_tab[md_id].ccci_ccmni_smem_ul_base_virt = base_virt; md_smem_tab[md_id].ccci_ccmni_smem_ul_base_phy = base_phy; md_smem_tab[md_id].ccci_ccmni_smem_ul_size = net_smem_ul_size[md_id]; base_virt += net_smem_ul_size[md_id]; base_phy += net_smem_ul_size[md_id]; } else { md_smem_tab[md_id].ccci_ccmni_smem_ul_base_virt = 0; md_smem_tab[md_id].ccci_ccmni_smem_ul_base_phy = 0; md_smem_tab[md_id].ccci_ccmni_smem_ul_size = 0; } // CCMNI_V2 --DOWN-Link if(net_smem_dl_size[md_id] != 0) { md_smem_tab[md_id].ccci_ccmni_smem_dl_base_virt = base_virt; md_smem_tab[md_id].ccci_ccmni_smem_dl_base_phy = base_phy; md_smem_tab[md_id].ccci_ccmni_smem_dl_size = net_smem_dl_size[md_id]; base_virt += net_smem_dl_size[md_id]; base_phy += net_smem_dl_size[md_id]; } else { md_smem_tab[md_id].ccci_ccmni_smem_dl_base_virt = 0; md_smem_tab[md_id].ccci_ccmni_smem_dl_base_phy = 0; md_smem_tab[md_id].ccci_ccmni_smem_dl_size = 0; } // CCMNI_V2 --Ctrl memory for (i = 0; i < CCMNI_V2_PORT_NUM; i++) { if (net_v2_smem_size[md_id] == 0) { md_smem_tab[md_id].ccci_ccmni_ctl_smem_base_virt[i] = 0; md_smem_tab[md_id].ccci_ccmni_ctl_smem_base_phy[i] = 0; md_smem_tab[md_id].ccci_ccmni_ctl_smem_size[i] = 0; } else { md_smem_tab[md_id].ccci_ccmni_ctl_smem_base_virt[i] = base_virt; md_smem_tab[md_id].ccci_ccmni_ctl_smem_base_phy[i] = base_phy; md_smem_tab[md_id].ccci_ccmni_ctl_smem_size[i] = net_v2_smem_size[md_id]; } memset((void*)base_virt, 0, net_v2_smem_size[md_id]); base_virt += net_v2_smem_size[md_id]; base_phy += net_v2_smem_size[md_id]; } return ret; }
static void create_modules_from_initrd(struct bootinfo* bi, const uint8_t* initrd_base, size_t initrd_bytes) { errval_t err; lvaddr_t mmstrings_base = 0; lvaddr_t mmstrings = 0; // CPIO archive is crafted such that first file is // command-line strings for "modules" - ie menu.lst. The // subsequent file follow in the order they appear in // menu.lst.arm. const uint8_t* data; size_t bytes; if (cpio_get_file_by_name(initrd_base, initrd_bytes, "menu.lst.modules", &data, &bytes)) { assert(bytes < BASE_PAGE_SIZE); mmstrings_base = alloc_mem(BASE_PAGE_SIZE); mmstrings = mmstrings_base; STARTUP_PROGRESS(); // Create cap for strings area in first slot of modulecn err = caps_create_new( ObjType_Frame, mem_to_local_phys(mmstrings_base), BASE_PAGE_BITS, BASE_PAGE_BITS, my_core_id, caps_locate_slot( CNODE(spawn_state.modulecn), spawn_state.modulecn_slot++) ); assert(err_is_ok(err)); STARTUP_PROGRESS(); // Copy strings from file into allocated page memcpy((void*)mmstrings_base, data, bytes); ((char*)mmstrings_base)[bytes] = '\0'; STARTUP_PROGRESS(); // Skip first line (corresponds to bootscript in archive) strtok((char*)mmstrings_base, "\r\n"); STARTUP_PROGRESS(); assert(bi->regions_length == 0); int ord = 1; const char* name; while ((mmstrings = (lvaddr_t)strtok(NULL, "\r\n")) != 0) { if (!cpio_get_file_by_ordinal(initrd_base, initrd_bytes, ord, &name, &data, &bytes)) { panic("Failed to find file\n"); } ord++; debug(SUBSYS_STARTUP, "Creating caps for \"%s\" (Command-line \"%s\")\n", name, (char*)mmstrings); // Copy file from archive into RAM. // TODO: Give up archive space. size_t pa_bytes = round_up(bytes, BASE_PAGE_SIZE); lpaddr_t pa = alloc_phys(pa_bytes); memcpy((void*)local_phys_to_mem(pa), data, bytes); struct mem_region* region = &bi->regions[bi->regions_length++]; region->mr_type = RegionType_Module; region->mrmod_slot = spawn_state.modulecn_slot; region->mrmod_size = pa_bytes; region->mrmod_data = mmstrings - mmstrings_base; assert((pa & BASE_PAGE_MASK) == 0); assert((pa_bytes & BASE_PAGE_MASK) == 0); while (pa_bytes != 0) { assert(spawn_state.modulecn_slot < (1UL << spawn_state.modulecn->cap.u.cnode.bits)); // create as DevFrame cap to avoid zeroing memory contents err = caps_create_new( ObjType_DevFrame, pa, BASE_PAGE_BITS, BASE_PAGE_BITS, my_core_id, caps_locate_slot( CNODE(spawn_state.modulecn), spawn_state.modulecn_slot++) ); assert(err_is_ok(err)); pa += BASE_PAGE_SIZE; pa_bytes -= BASE_PAGE_SIZE; } } } else { panic("No command-line file.\n"); } }
int main ( int argc, char ** argv ) { int init_address=-1; int max_align = 0; unsigned long curr_size = 0; bfd *obj_bfd = NULL; bfd_error_type myerr; unsigned u = 0, v = 0; asymbol **q = NULL; asection *s = NULL; static struct bfd_link_callbacks link_callbacks; static struct bfd_link_order link_order; void *current = NULL; void *cfd_self = NULL; void *cfd_start = NULL; int cfd_size = 0; void *the_start = NULL; void *start_address = NULL; void *m = NULL; fprintf ( stderr, "In BFD fast load test. Reloc_howto_type size %d\n", sizeof ( rhtt ) ); if ( argc < 3 ) { fprintf ( stderr, "Need an executable (eg raw_gcl.exe) and an object file as arguments.\n" ); } else { memset ( &link_info, 0, sizeof (link_info) ); memset ( &link_order, 0, sizeof (link_order) ); memset ( &link_callbacks, 0, sizeof (link_callbacks) ); fprintf ( stderr, "BUILDING EXECUTABLE SYMBOL TABLE FOR %s \n", argv[1] ); build_symbol_table_bfd ( argv[1] ); link_callbacks.add_archive_element=madd_archive_element; link_callbacks.multiple_definition=mmultiple_definition; link_callbacks.multiple_common=mmultiple_common; link_callbacks.add_to_set=madd_to_set; link_callbacks.constructor=mconstructor; link_callbacks.warning=mwarning; link_callbacks.undefined_symbol=mundefined_symbol; link_callbacks.reloc_overflow=mreloc_overflow; link_callbacks.reloc_dangerous=mreloc_dangerous; link_callbacks.unattached_reloc=munattached_reloc; link_callbacks.notice = mnotice; link_info.callbacks = &link_callbacks; link_order.type = bfd_indirect_link_order; fprintf ( stderr, "OPENING OBJECT FILE %s\n", argv[2] ); if ( ! ( obj_bfd = bfd_openr ( argv[2], 0 ) ) ) { fprintf ( stderr, "Cannot open bfd.\n" ); } if ( ( myerr = bfd_get_error () ) && myerr != 3 ) { fprintf ( stderr, "Unknown bfd error code on openr %s %d\n.", argv[2], myerr ); } fflush ( stderr ); if ( ! bfd_check_format ( obj_bfd, bfd_object ) ) { fprintf ( stderr, "Unknown bfd format %s.\n", argv[2] ); } if ( ( myerr = bfd_get_error () ) && myerr != 3 ) { fprintf ( stderr, "Unknown bfd error code on check_format %s\n", argv[2] ); } bfd_set_error(0); current = NULL; fprintf ( stderr, "CALCULATING CURRENT, MAX_ALIGN and ALLOCATING \n\n" ); for ( s= obj_bfd->sections;s;s=s->next) { s->owner = obj_bfd; s->output_section = ( s->flags & SEC_ALLOC) ? s : obj_bfd->sections; s->output_offset=0; if (!(s->flags & SEC_ALLOC)) continue; if (max_align<s->alignment_power) max_align=s->alignment_power; current=round_up(current,1<<s->alignment_power); current+=s->_raw_size; fprintf ( stderr, "Section %s: owner = %x, output_offset = %x, " "output_section = %x (%s)\n", s->name, s->owner, s->output_offset, s->output_section, s->output_section->name ); } fprintf ( stderr, "1\n"); curr_size=(unsigned long)current; max_align=1<<max_align; cfd_self = 0; cfd_start = 0; cfd_size = curr_size + (max_align > sizeof(char *) ? max_align :0); cfd_start = (void *) malloc ( cfd_size ); the_start = start_address = cfd_start; fprintf ( stderr, "ALLOCATED %d bytes \n\n", cfd_size ); fprintf ( stderr, "max_align = %d, current = %d, cfd_self = %x, " "cfd_size = %x, cfd_start = %x\n", max_align, current, cfd_self, cfd_size, cfd_start ); start_address = ROUND_UP ( start_address, max_align ); cfd_size = cfd_size - ( start_address - the_start ); cfd_start = (void *) start_address; fprintf ( stderr, "max_align = %d, current = %d, cfd_self = %x, " "cfd_size = %x, cfd_start = %x\n", max_align, current, cfd_self, cfd_size, cfd_start ); memset ( cfd_start, 0, cfd_size ); for ( m = start_address, s = obj_bfd->sections; s; s=s->next ) { if (!(s->flags & SEC_ALLOC)) continue; m=round_up(m,1<<s->alignment_power); s->output_section->vma=(unsigned long)m; m+=s->_raw_size; fprintf ( stderr, "Section address %x\n", s ); fprintf ( stderr, "m loop Section %s: owner = %x, output_offset = %x, " "output_section = %x (%s), vma = %x, m = %x\n", s->name, s->owner, s->output_offset, s->output_section, s->output_section->name, s->output_section->vma, m ); } fprintf ( stderr, "\n\nDOING SOMETHING WITH THE HASHED SYMBOLS\n\n" ); if ((u=bfd_get_symtab_upper_bound(obj_bfd))<0) fprintf ( stderr, "Cannot get symtab uppoer bound.\n" ); q = (asymbol **) alloca ( u ); if ( ( v = bfd_canonicalize_symtab ( obj_bfd, q ) ) < 0 ) fprintf ( stderr, "cannot canonicalize symtab.\n" ); fprintf ( stderr, "u = %d, v = %d\n", u, v ); for (u=0;u<v;u++) { struct bfd_link_hash_entry *h; fprintf ( stderr, "u loop q[%d]->name = %s\n", u, q[u]->name ); if (!strncmp("init_",q[u]->name,5)) { init_address=q[u]->value; continue; } if (!(h=bfd_link_hash_lookup(link_info.hash,q[u]->name, MY_BFD_FALSE, MY_BFD_FALSE, MY_BFD_TRUE))) continue; if (h->type!=bfd_link_hash_defined) fprintf ( stderr, "Undefined symbol.\n" ); if (h->u.def.section) { q[u]->value=h->u.def.value+h->u.def.section->vma; q[u]->flags|=BSF_WEAK; } else fprintf ( stderr, "Symbol without section.\n" ); } fprintf ( stderr, "\n\nDOING RELOCATIONS\n\n", cfd_size ); fflush ( stderr ); for ( s = obj_bfd->sections; s; s = s->next ) { fprintf ( stderr, "s->name %s, s->flags = %x\n", s->name, s->flags ); if ( ! ( s->flags & SEC_LOAD ) ) continue; link_order.u.indirect.section=s; fprintf ( stderr, "About to get reloc section contents\n" ); fprintf ( stderr, "obj_bfd = %x, section %s, s->output_section = %x, q = %x\n", obj_bfd, s->name, s->output_section, q); fflush ( stderr ); if (!bfd_get_relocated_section_contents(obj_bfd, &link_info,&link_order, (void *)(unsigned long)s->output_section->vma,0,q)) fprintf ( stderr, "Cannot get relocated section contents\n"); } bfd_close ( obj_bfd ); printf("start address -T %x \n", cfd_start); } }
static void collect_md_settings(void) { unsigned int tmp; unsigned int md1_en = 0; unsigned int md2_en = 0; unsigned int md3_en = 0; unsigned int md5_en = 0; md_usage_case = 0; printk("[ccci] collect_md_settings\n"); // MTK_ENABLE_MD* if(ccci_get_fo_setting("MTK_ENABLE_MD1", &tmp) == 0) { if(tmp > 0) md1_en = 1; } if(ccci_get_fo_setting("MTK_ENABLE_MD2", &tmp) == 0) { if(tmp > 0) md2_en = 1; } if(ccci_get_fo_setting("MTK_ENABLE_MD3", &tmp) == 0) { if(tmp > 0) md3_en = 1; } if(ccci_get_fo_setting("MTK_ENABLE_MD5", &tmp) == 0) { if(tmp > 0) md5_en = 1; } // MTK_MD*_SUPPORT if(ccci_get_fo_setting("MTK_MD1_SUPPORT", &tmp) == 0) { md_support[MD_SYS1] = tmp; } if(ccci_get_fo_setting("MTK_MD2_SUPPORT", &tmp) == 0) { md_support[MD_SYS2] = tmp; } if(ccci_get_fo_setting("MTK_MD3_SUPPORT", &tmp) == 0) { md_support[MD_SYS3] = tmp; } if(ccci_get_fo_setting("MTK_MD5_SUPPORT", &tmp) == 0) { md_support[MD_SYS5] = tmp; } // MD*_SIZE /* * for legacy CCCI: make share memory start address to be 2MB align, as share * memory size is 2MB - requested by MD MPU. * for ECCCI: ROM+RAM size will be align to 1M, and share memory is 2K, * 1M alignment is also 2K alignment. */ if(ccci_get_fo_setting("MD1_SIZE", &tmp) == 0) { tmp = round_up(tmp, get_md_smem_align(MD_SYS1)); md_resv_mem_size[MD_SYS1] = tmp; } if(ccci_get_fo_setting("MD2_SIZE", &tmp) == 0) { tmp = round_up(tmp, get_md_smem_align(MD_SYS2)); md_resv_mem_size[MD_SYS2] = tmp; } if(ccci_get_fo_setting("MD3_SIZE", &tmp) == 0) { tmp = round_up(tmp, get_md_smem_align(MD_SYS3)); md_resv_mem_size[MD_SYS3] = tmp; } // MD*_SMEM_SIZE #if 0 if(ccci_get_fo_setting("MD1_SMEM_SIZE", &tmp) == 0) { md_resv_smem_size[MD_SYS1] = tmp; } #else md_resv_smem_size[MD_SYS1] = 2*1024*1024; #endif #if 0 if(ccci_get_fo_setting("MD2_SMEM_SIZE", &tmp) == 0) { md_resv_smem_size[MD_SYS2] = tmp; } #else md_resv_smem_size[MD_SYS2] = 4*1024*1024; #endif if(ccci_get_fo_setting("MD3_SMEM_SIZE", &tmp) == 0) { md_resv_smem_size[MD_SYS3] = tmp; } // Setting conflict checking if(md1_en && (md_resv_smem_size[MD_SYS1]>0) && (md_resv_mem_size[MD_SYS1]>0)) { // Setting is OK } else if (md1_en && ((md_resv_smem_size[MD_SYS1]<=0) || (md_resv_mem_size[MD_SYS1]<=0))) { CCCI_UTIL_ERR_MSG_WITH_ID(MD_SYS1,"FO Setting for md1 wrong: <%d:0x%08X:0x%08X>\n", md1_en, md_resv_mem_size[MD_SYS1], md_resv_smem_size[MD_SYS1]); md1_en = 0; md_resv_smem_size[MD_SYS1] = 0; md_resv_mem_size[MD_SYS1] = 0; } if(md2_en && (md_resv_smem_size[MD_SYS2]>0) && (md_resv_mem_size[MD_SYS2]>0)) { // Setting is OK } else if (md2_en && ((md_resv_smem_size[MD_SYS2]<=0) || (md_resv_mem_size[MD_SYS2]<=0))) { CCCI_UTIL_ERR_MSG_WITH_ID(MD_SYS2,"FO Setting for md2 wrong: <%d:0x%08X:0x%08X>\n", md2_en, md_resv_mem_size[MD_SYS2], md_resv_smem_size[MD_SYS2]); md2_en = 0; md_resv_smem_size[MD_SYS2] = 0; md_resv_mem_size[MD_SYS2] = 0; } if(md3_en && (md_resv_smem_size[MD_SYS3]>0) && (md_resv_mem_size[MD_SYS3]>0)) { // Setting is OK } else if (md3_en && ((md_resv_smem_size[MD_SYS3]<=0) || (md_resv_mem_size[MD_SYS3]<=0))) { CCCI_UTIL_ERR_MSG_WITH_ID(MD_SYS3,"FO Setting for md3 wrong: <%d:0x%08X:0x%08X>\n", md3_en, md_resv_mem_size[MD_SYS3], md_resv_smem_size[MD_SYS3]); md3_en = 0; md_resv_smem_size[MD_SYS2] = 0; md_resv_mem_size[MD_SYS2] = 0; } if(md1_en) { md_usage_case |= MD1_EN; modem_num++; } if(md2_en) { md_usage_case |= MD2_EN; modem_num++; } if(md3_en) { md_usage_case |= MD3_EN; modem_num++; } if(md5_en) { md_usage_case |= MD5_EN; modem_num++; } memory_layout_cal(MEM_LAY_OUT_VER); }
// Pick up items at (pos). void Pickup::pick_up( const tripoint &p, int min ) { int cargo_part = -1; const optional_vpart_position vp = g->m.veh_at( p ); vehicle *const veh = veh_pointer_or_null( vp ); bool from_vehicle = false; if( min != -1 ) { veh_interact_results get_items_from = ITEMS_FROM_GROUND; if( veh != nullptr ) { get_items_from = veh->interact_with( p, vp->part_index() ); } switch( get_items_from ) { case DONE: return; case ITEMS_FROM_CARGO: { const cata::optional<vpart_reference> carg = vp.part_with_feature( "CARGO", false ); cargo_part = carg ? carg->part_index() : -1; from_vehicle = cargo_part >= 0; break; } case ITEMS_FROM_GROUND: // Nothing to change, default is to pick from ground anyway. if( g->m.has_flag( "SEALED", p ) ) { return; } break; } } if( !from_vehicle ) { bool isEmpty = ( g->m.i_at( p ).empty() ); // Hide the pickup window if this is a toilet and there's nothing here // but water. if( ( !isEmpty ) && g->m.furn( p ) == f_toilet ) { isEmpty = true; for( const item &maybe_water : g->m.i_at( p ) ) { if( maybe_water.typeId() != "water" ) { isEmpty = false; break; } } } if( isEmpty && ( min != -1 || !get_option<bool>( "AUTO_PICKUP_ADJACENT" ) ) ) { return; } } // which items are we grabbing? std::vector<item> here; if( from_vehicle ) { auto vehitems = veh->get_items( cargo_part ); here.resize( vehitems.size() ); std::copy( vehitems.begin(), vehitems.end(), here.begin() ); } else { auto mapitems = g->m.i_at( p ); here.resize( mapitems.size() ); std::copy( mapitems.begin(), mapitems.end(), here.begin() ); } if( min == -1 ) { // Recursively pick up adjacent items if that option is on. if( get_option<bool>( "AUTO_PICKUP_ADJACENT" ) && g->u.pos() == p ) { //Autopickup adjacent direction adjacentDir[8] = {NORTH, NORTHEAST, EAST, SOUTHEAST, SOUTH, SOUTHWEST, WEST, NORTHWEST}; for( auto &elem : adjacentDir ) { tripoint apos = tripoint( direction_XY( elem ), 0 ); apos += p; pick_up( apos, min ); } } // Bail out if this square cannot be auto-picked-up if( g->check_zone( zone_type_id( "NO_AUTO_PICKUP" ), p ) ) { return; } else if( g->m.has_flag( "SEALED", p ) ) { return; } } // Not many items, just grab them if( static_cast<int>( here.size() ) <= min && min != -1 ) { g->u.assign_activity( activity_id( "ACT_PICKUP" ) ); g->u.activity.placement = p - g->u.pos(); g->u.activity.values.push_back( from_vehicle ); // Only one item means index is 0. g->u.activity.values.push_back( 0 ); // auto-pickup means pick up all. g->u.activity.values.push_back( 0 ); return; } std::vector<std::list<item_idx>> stacked_here; for( size_t i = 0; i < here.size(); i++ ) { item &it = here[i]; bool found_stack = false; for( auto &stack : stacked_here ) { if( stack.begin()->_item.stacks_with( it ) ) { item_idx el = { it, i }; stack.push_back( el ); found_stack = true; break; } } if( !found_stack ) { std::list<item_idx> newstack; newstack.push_back( { it, i } ); stacked_here.push_back( newstack ); } } std::reverse( stacked_here.begin(), stacked_here.end() ); if( min != -1 ) { // don't bother if we're just autopickuping g->temp_exit_fullscreen(); } // Otherwise, we have Autopickup, 2 or more items and should list them, etc. int maxmaxitems = TERMY; int itemsH = std::min( 25, TERMY / 2 ); int pickupBorderRows = 3; // The pickup list may consume the entire terminal, minus space needed for its // header/footer and the item info window. int minleftover = itemsH + pickupBorderRows; if( maxmaxitems > TERMY - minleftover ) { maxmaxitems = TERMY - minleftover; } const int minmaxitems = 9; std::vector<pickup_count> getitem( stacked_here.size() ); int maxitems = stacked_here.size(); maxitems = ( maxitems < minmaxitems ? minmaxitems : ( maxitems > maxmaxitems ? maxmaxitems : maxitems ) ); int itemcount = 0; if( min == -1 ) { //Auto Pickup, select matching items if( !select_autopickup_items( stacked_here, getitem ) ) { // If we didn't find anything, bail out now. return; } } else { int pickupH = maxitems + pickupBorderRows; int pickupW = 44; int itemsW = pickupW; catacurses::window w_pickup = catacurses::newwin( pickupH, pickupW, 0, 0 ); catacurses::window w_item_info = catacurses::newwin( TERMY - pickupH, pickupW, pickupH, 0 ); std::string action; long raw_input_char = ' '; input_context ctxt( "PICKUP" ); ctxt.register_action( "UP" ); ctxt.register_action( "DOWN" ); ctxt.register_action( "RIGHT" ); ctxt.register_action( "LEFT" ); ctxt.register_action( "NEXT_TAB", _( "Next page" ) ); ctxt.register_action( "PREV_TAB", _( "Previous page" ) ); ctxt.register_action( "SCROLL_UP" ); ctxt.register_action( "SCROLL_DOWN" ); ctxt.register_action( "CONFIRM" ); ctxt.register_action( "SELECT_ALL" ); ctxt.register_action( "QUIT", _( "Cancel" ) ); ctxt.register_action( "ANY_INPUT" ); ctxt.register_action( "HELP_KEYBINDINGS" ); ctxt.register_action( "FILTER" ); #if defined(__ANDROID__) ctxt.allow_text_entry = true; // allow user to specify pickup amount #endif int start = 0; int cur_it = 0; bool update = true; mvwprintw( w_pickup, 0, 0, _( "PICK" ) ); int selected = 0; int iScrollPos = 0; std::string filter; std::string new_filter; std::vector<int> matches;//Indexes of items that match the filter bool filter_changed = true; if( g->was_fullscreen ) { g->draw_ter(); } // Now print the two lists; those on the ground and about to be added to inv // Continue until we hit return or space do { const std::string pickup_chars = ctxt.get_available_single_char_hotkeys( "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ:;" ); int idx = -1; for( int i = 1; i < pickupH; i++ ) { mvwprintw( w_pickup, i, 0, " " ); } if( action == "ANY_INPUT" && raw_input_char >= '0' && raw_input_char <= '9' ) { int raw_input_char_value = static_cast<char>( raw_input_char ) - '0'; itemcount *= 10; itemcount += raw_input_char_value; if( itemcount < 0 ) { itemcount = 0; } } else if( action == "SCROLL_UP" ) { iScrollPos--; } else if( action == "SCROLL_DOWN" ) { iScrollPos++; } else if( action == "PREV_TAB" ) { if( start > 0 ) { start -= maxitems; } else { start = static_cast<int>( ( matches.size() - 1 ) / maxitems ) * maxitems; } selected = start; mvwprintw( w_pickup, maxitems + 2, 0, " " ); } else if( action == "NEXT_TAB" ) { if( start + maxitems < static_cast<int>( matches.size() ) ) { start += maxitems; } else { start = 0; } iScrollPos = 0; selected = start; mvwprintw( w_pickup, maxitems + 2, pickupH, " " ); } else if( action == "UP" ) { selected--; iScrollPos = 0; if( selected < 0 ) { selected = matches.size() - 1; start = static_cast<int>( matches.size() / maxitems ) * maxitems; if( start >= static_cast<int>( matches.size() ) ) { start -= maxitems; } } else if( selected < start ) { start -= maxitems; } } else if( action == "DOWN" ) { selected++; iScrollPos = 0; if( selected >= static_cast<int>( matches.size() ) ) { selected = 0; start = 0; } else if( selected >= start + maxitems ) { start += maxitems; } } else if( selected >= 0 && selected < static_cast<int>( matches.size() ) && ( ( action == "RIGHT" && !getitem[matches[selected]].pick ) || ( action == "LEFT" && getitem[matches[selected]].pick ) ) ) { idx = selected; } else if( action == "FILTER" ) { new_filter = filter; string_input_popup popup; popup .title( _( "Set filter" ) ) .width( 30 ) .edit( new_filter ); if( !popup.canceled() ) { filter_changed = true; } else { wrefresh( g->w_terrain ); g->draw_panels(); } } else if( action == "ANY_INPUT" && raw_input_char == '`' ) { std::string ext = string_input_popup() .title( _( "Enter 2 letters (case sensitive):" ) ) .width( 3 ) .max_length( 2 ) .query_string(); if( ext.size() == 2 ) { int p1 = pickup_chars.find( ext.at( 0 ) ); int p2 = pickup_chars.find( ext.at( 1 ) ); if( p1 != -1 && p2 != -1 ) { idx = pickup_chars.size() + ( p1 * pickup_chars.size() ) + p2; } } } else if( action == "ANY_INPUT" ) { idx = ( raw_input_char <= 127 ) ? pickup_chars.find( raw_input_char ) : -1; iScrollPos = 0; } if( idx >= 0 && idx < static_cast<int>( matches.size() ) ) { size_t true_idx = matches[idx]; if( itemcount != 0 || getitem[true_idx].count == 0 ) { item &temp = stacked_here[true_idx].begin()->_item; int amount_available = temp.count_by_charges() ? temp.charges : stacked_here[true_idx].size(); if( itemcount >= amount_available ) { itemcount = 0; } getitem[true_idx].count = itemcount; itemcount = 0; } // Note: this might not change the value of getitem[idx] at all! getitem[true_idx].pick = ( action == "RIGHT" ? true : ( action == "LEFT" ? false : !getitem[true_idx].pick ) ); if( action != "RIGHT" && action != "LEFT" ) { selected = idx; start = static_cast<int>( idx / maxitems ) * maxitems; } if( !getitem[true_idx].pick ) { getitem[true_idx].count = 0; } update = true; } if( filter_changed ) { matches.clear(); while( matches.empty() ) { auto filter_func = item_filter_from_string( new_filter ); for( size_t index = 0; index < stacked_here.size(); index++ ) { if( filter_func( stacked_here[index].begin()->_item ) ) { matches.push_back( index ); } } if( matches.empty() ) { popup( _( "Your filter returned no results" ) ); wrefresh( g->w_terrain ); g->draw_panels(); // The filter must have results, or simply be emptied or canceled, // as this screen can't be reached without there being // items available string_input_popup popup; popup .title( _( "Set filter" ) ) .width( 30 ) .edit( new_filter ); if( popup.canceled() ) { new_filter = filter; filter_changed = false; } } } if( filter_changed ) { filter = new_filter; filter_changed = false; selected = 0; start = 0; iScrollPos = 0; } wrefresh( g->w_terrain ); g->draw_panels(); } item &selected_item = stacked_here[matches[selected]].begin()->_item; werase( w_item_info ); if( selected >= 0 && selected <= static_cast<int>( stacked_here.size() ) - 1 ) { std::vector<iteminfo> vThisItem; std::vector<iteminfo> vDummy; selected_item.info( true, vThisItem ); draw_item_info( w_item_info, "", "", vThisItem, vDummy, iScrollPos, true, true ); } draw_custom_border( w_item_info, 0 ); mvwprintw( w_item_info, 0, 2, "< " ); trim_and_print( w_item_info, 0, 4, itemsW - 8, c_white, "%s >", selected_item.display_name() ); wrefresh( w_item_info ); if( action == "SELECT_ALL" ) { int count = 0; for( auto i : matches ) { if( getitem[i].pick ) { count++; } getitem[i].pick = true; } if( count == static_cast<int>( stacked_here.size() ) ) { for( size_t i = 0; i < stacked_here.size(); i++ ) { getitem[i].pick = false; } } update = true; } for( cur_it = start; cur_it < start + maxitems; cur_it++ ) { mvwprintw( w_pickup, 1 + ( cur_it % maxitems ), 0, " " ); if( cur_it < static_cast<int>( matches.size() ) ) { int true_it = matches[cur_it]; item &this_item = stacked_here[ true_it ].begin()->_item; nc_color icolor = this_item.color_in_inventory(); if( cur_it == selected ) { icolor = hilite( c_white ); } if( cur_it < static_cast<int>( pickup_chars.size() ) ) { mvwputch( w_pickup, 1 + ( cur_it % maxitems ), 0, icolor, static_cast<char>( pickup_chars[cur_it] ) ); } else if( cur_it < static_cast<int>( pickup_chars.size() ) + static_cast<int> ( pickup_chars.size() ) * static_cast<int>( pickup_chars.size() ) ) { int p = cur_it - pickup_chars.size(); int p1 = p / pickup_chars.size(); int p2 = p % pickup_chars.size(); mvwprintz( w_pickup, 1 + ( cur_it % maxitems ), 0, icolor, "`%c%c", static_cast<char>( pickup_chars[p1] ), static_cast<char>( pickup_chars[p2] ) ); } else { mvwputch( w_pickup, 1 + ( cur_it % maxitems ), 0, icolor, ' ' ); } if( getitem[true_it].pick ) { if( getitem[true_it].count == 0 ) { wprintz( w_pickup, c_light_blue, " + " ); } else { wprintz( w_pickup, c_light_blue, " # " ); } } else { wprintw( w_pickup, " - " ); } std::string item_name; if( stacked_here[true_it].begin()->_item.ammo_type() == "money" ) { //Count charges // TODO: transition to the item_location system used for the inventory unsigned long charges_total = 0; for( const auto &item : stacked_here[true_it] ) { charges_total += item._item.charges; } //Picking up none or all the cards in a stack if( !getitem[true_it].pick || getitem[true_it].count == 0 ) { item_name = stacked_here[true_it].begin()->_item.display_money( stacked_here[true_it].size(), charges_total ); } else { unsigned long charges = 0; int c = getitem[true_it].count; for( auto it = stacked_here[true_it].begin(); it != stacked_here[true_it].end() && c > 0; ++it, --c ) { charges += it->_item.charges; } item_name = string_format( _( "%s of %s" ), stacked_here[true_it].begin()->_item.display_money( getitem[true_it].count, charges ), format_money( charges_total ) ); } } else { item_name = this_item.display_name( stacked_here[true_it].size() ); } if( stacked_here[true_it].size() > 1 ) { item_name = string_format( "%d %s", stacked_here[true_it].size(), item_name ); } if( get_option<bool>( "ITEM_SYMBOLS" ) ) { item_name = string_format( "%s %s", this_item.symbol(), item_name ); } trim_and_print( w_pickup, 1 + ( cur_it % maxitems ), 6, pickupW - 4, icolor, item_name ); } } mvwprintw( w_pickup, maxitems + 1, 0, _( "[%s] Unmark" ), ctxt.get_desc( "LEFT", 1 ) ); center_print( w_pickup, maxitems + 1, c_light_gray, string_format( _( "[%s] Help" ), ctxt.get_desc( "HELP_KEYBINDINGS", 1 ) ) ); right_print( w_pickup, maxitems + 1, 0, c_light_gray, string_format( _( "[%s] Mark" ), ctxt.get_desc( "RIGHT", 1 ) ) ); mvwprintw( w_pickup, maxitems + 2, 0, _( "[%s] Prev" ), ctxt.get_desc( "PREV_TAB", 1 ) ); center_print( w_pickup, maxitems + 2, c_light_gray, string_format( _( "[%s] All" ), ctxt.get_desc( "SELECT_ALL", 1 ) ) ); right_print( w_pickup, maxitems + 2, 0, c_light_gray, string_format( _( "[%s] Next" ), ctxt.get_desc( "NEXT_TAB", 1 ) ) ); if( update ) { // Update weight & volume information update = false; for( int i = 9; i < pickupW; ++i ) { mvwaddch( w_pickup, 0, i, ' ' ); } units::mass weight_picked_up = 0_gram; units::volume volume_picked_up = 0_ml; for( size_t i = 0; i < getitem.size(); i++ ) { if( getitem[i].pick ) { item temp = stacked_here[i].begin()->_item; if( temp.count_by_charges() && getitem[i].count < temp.charges && getitem[i].count != 0 ) { temp.charges = getitem[i].count; } int num_picked = std::min( stacked_here[i].size(), getitem[i].count == 0 ? stacked_here[i].size() : getitem[i].count ); weight_picked_up += temp.weight() * num_picked; volume_picked_up += temp.volume() * num_picked; } } auto weight_predict = g->u.weight_carried() + weight_picked_up; auto volume_predict = g->u.volume_carried() + volume_picked_up; mvwprintz( w_pickup, 0, 5, weight_predict > g->u.weight_capacity() ? c_red : c_white, _( "Wgt %.1f" ), round_up( convert_weight( weight_predict ), 1 ) ); wprintz( w_pickup, c_white, "/%.1f", round_up( convert_weight( g->u.weight_capacity() ), 1 ) ); std::string fmted_volume_predict = format_volume( volume_predict ); mvwprintz( w_pickup, 0, 18, volume_predict > g->u.volume_capacity() ? c_red : c_white, _( "Vol %s" ), fmted_volume_predict ); std::string fmted_volume_capacity = format_volume( g->u.volume_capacity() ); wprintz( w_pickup, c_white, "/%s", fmted_volume_capacity ); } wrefresh( w_pickup ); action = ctxt.handle_input(); raw_input_char = ctxt.get_raw_input().get_first_input(); } while( action != "QUIT" && action != "CONFIRM" ); bool item_selected = false; // Check if we have selected an item. for( auto selection : getitem ) { if( selection.pick ) { item_selected = true; } } if( action != "CONFIRM" || !item_selected ) { w_pickup = catacurses::window(); w_item_info = catacurses::window(); add_msg( _( "Never mind." ) ); g->reenter_fullscreen(); g->refresh_all(); return; } } // At this point we've selected our items, register an activity to pick them up. g->u.assign_activity( activity_id( "ACT_PICKUP" ) ); g->u.activity.placement = p - g->u.pos(); g->u.activity.values.push_back( from_vehicle ); if( min == -1 ) { // Auto pickup will need to auto resume since there can be several of them on the stack. g->u.activity.auto_resume = true; } std::vector<std::pair<int, int>> pick_values; for( size_t i = 0; i < stacked_here.size(); i++ ) { const auto &selection = getitem[i]; if( !selection.pick ) { continue; } const auto &stack = stacked_here[i]; // Note: items can be both charged and stacked // For robustness, let's assume they can be both in the same stack bool pick_all = selection.count == 0; size_t count = selection.count; for( const item_idx &it : stack ) { if( !pick_all && count == 0 ) { break; } if( it._item.count_by_charges() ) { size_t num_picked = std::min( static_cast<size_t>( it._item.charges ), count ); pick_values.push_back( { static_cast<int>( it.idx ), static_cast<int>( num_picked ) } ); count -= num_picked; } else { size_t num_picked = 1; pick_values.push_back( { static_cast<int>( it.idx ), 0 } ); count -= num_picked; } } } // The pickup activity picks up items last-to-first from its values list, so make sure the // higher indices are at the end. std::sort( pick_values.begin(), pick_values.end() ); for( auto &it : pick_values ) { g->u.activity.values.push_back( it.first ); g->u.activity.values.push_back( it.second ); } g->reenter_fullscreen(); }
/* * Check and strip the PE wrapper from around the signature and check that the * remnant looks something like PKCS#7. */ static int pefile_strip_sig_wrapper(const void *pebuf, struct pefile_context *ctx) { struct win_certificate wrapper; const u8 *pkcs7; unsigned len; if (ctx->sig_len < sizeof(wrapper)) { pr_debug("Signature wrapper too short\n"); return -ELIBBAD; } memcpy(&wrapper, pebuf + ctx->sig_offset, sizeof(wrapper)); pr_debug("sig wrapper = { %x, %x, %x }\n", wrapper.length, wrapper.revision, wrapper.cert_type); /* Both pesign and sbsign round up the length of certificate table * (in optional header data directories) to 8 byte alignment. */ if (round_up(wrapper.length, 8) != ctx->sig_len) { pr_debug("Signature wrapper len wrong\n"); return -ELIBBAD; } if (wrapper.revision != WIN_CERT_REVISION_2_0) { pr_debug("Signature is not revision 2.0\n"); return -ENOTSUPP; } if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) { pr_debug("Signature certificate type is not PKCS\n"); return -ENOTSUPP; } /* It looks like the pkcs signature length in wrapper->length and the * size obtained from the data dir entries, which lists the total size * of certificate table, are both aligned to an octaword boundary, so * we may have to deal with some padding. */ ctx->sig_len = wrapper.length; ctx->sig_offset += sizeof(wrapper); ctx->sig_len -= sizeof(wrapper); if (ctx->sig_len < 4) { pr_debug("Signature data missing\n"); return -EKEYREJECTED; } /* What's left should be a PKCS#7 cert */ pkcs7 = pebuf + ctx->sig_offset; if (pkcs7[0] != (ASN1_CONS_BIT | ASN1_SEQ)) goto not_pkcs7; switch (pkcs7[1]) { case 0 ... 0x7f: len = pkcs7[1] + 2; goto check_len; case ASN1_INDEFINITE_LENGTH: return 0; case 0x81: len = pkcs7[2] + 3; goto check_len; case 0x82: len = ((pkcs7[2] << 8) | pkcs7[3]) + 4; goto check_len; case 0x83 ... 0xff: return -EMSGSIZE; default: goto not_pkcs7; } check_len: if (len <= ctx->sig_len) { /* There may be padding */ ctx->sig_len = len; return 0; } not_pkcs7: pr_debug("Signature data not PKCS#7\n"); return -ELIBBAD; }
int elf_object_map(struct process *proc, struct elf_module *m) { int i; int r; dprintf("to load (%d)\n", m->num_to_load); dprintf("%-8s %-8s %-8s %-8s\n", "vaddr", "memsz", "offset", "filesz"); for (i = 0; i < m->num_to_load; i++) { m->min_vaddr = MIN(round_down_to_page(m->to_load[i].p_vaddr), m->min_vaddr); m->max_vaddr = MAX(round_up_to_page(m->to_load[i].p_vaddr + m->to_load[i].p_memsz), m->max_vaddr); dprintf("%08x %08x %08x %08x\n", m->to_load[i].p_vaddr, m->to_load[i].p_memsz, m->to_load[i].p_offset, m->to_load[i].p_filesz); } dprintf("vaddr -> %08x-%08x\n", m->min_vaddr, m->max_vaddr); /* reserve memory for image */ m->base = vm_process_map(proc, m->min_vaddr, m->max_vaddr - m->min_vaddr, _l_PROT_NONE, _l_MAP_ANONYMOUS|_l_MAP_PRIVATE, NULL, 0); if (m->base == _l_MAP_FAILED) { dprintf("mmap failed\n"); goto error; } dprintf("base = %08x\n", m->base); for (i = 0; i < m->num_to_load; i++) { int mapflags = elf_mmap_flags_get(m->to_load[i].p_flags); user_ptr_t p; unsigned int vaddr = round_down_to_page(m->to_load[i].p_vaddr); unsigned int vaddr_offset = (m->to_load[i].p_vaddr & pagemask); unsigned int memsz = round_up_to_page(vaddr_offset + m->to_load[i].p_memsz); unsigned int max_addr; void *ptr; size_t max_sz = 0; elf_map_flags_print(mapflags); p = m->base - m->min_vaddr + vaddr; dprintf("map at %08x, offset %08x sz %08x\n", p, vaddr, memsz); /* * Map anonymous memory then read the data in * rather than mapping the file directly. * * The windows page granularity is different to that on Linux. * The pages may need to be modified to apply relocations. * * nb. need MAP_FIXED to blow away our old mapping */ p = vm_process_map(proc, p, memsz, _l_PROT_READ | _l_PROT_WRITE | _l_PROT_EXEC, _l_MAP_FIXED|_l_MAP_PRIVATE|_l_MAP_ANONYMOUS, NULL, 0); if (p == _l_MAP_FAILED) { fprintf(stderr, "mmap failed (%d)\n", -(int)p); goto error; } p = m->base - m->min_vaddr + m->to_load[i].p_vaddr; dprintf("pread %08x bytes from %08x to %08x\n", m->to_load[i].p_filesz, m->to_load[i].p_offset, p); r = vm_get_pointer(proc, p, &ptr, &max_sz); if (r < 0) goto error; if (max_sz < m->to_load[i].p_filesz) { r = -_L(EPERM); goto error; } r = elf_read(m->fp, ptr, m->to_load[i].p_filesz, m->to_load[i].p_offset); if (r != m->to_load[i].p_filesz) { fprintf(stderr, "read failed (%08x != %08x)\n", m->to_load[i].p_filesz, r); goto error; } /* remember highest address we mapped, use it for brk */ max_addr = m->to_load[i].p_vaddr + m->to_load[i].p_memsz; max_addr = round_up(max_addr, pagesize); if (proc->brk < max_addr) proc->brk = max_addr; dprintf("brk at %08x\n", proc->brk); } m->entry_point = m->base - m->min_vaddr + m->ehdr.e_entry; return 0; error: return -1; }
template < typename IN_PORT_TYPE > int probe_signal_i_base::_forecastAndProcess( bool &eos, typename std::vector< gr_istream< IN_PORT_TYPE > > &istreams ) { typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList; typename _IStreamList::iterator istream = istreams.begin(); int nout = 0; bool dataReady = false; if ( !eos ) { uint64_t max_items_avail = 0; for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) { LOG_TRACE( probe_signal_i_base, "GET MAX ITEMS: STREAM:" << idx << " NITEMS/SCALARS:" << istream->nitems() << "/" << istream->_data.size() ); max_items_avail = std::max( istream->nitems(), max_items_avail ); } // // calc number of output items to produce // noutput_items = (int) (max_items_avail * gr_sptr->relative_rate ()); noutput_items = round_down (noutput_items, gr_sptr->output_multiple ()); if ( noutput_items <= 0 ) { LOG_TRACE( probe_signal_i_base, "DATA CHECK - MAX ITEMS NOUTPUT/MAX_ITEMS:" << noutput_items << "/" << max_items_avail); return -1; } if ( gr_sptr->fixed_rate() ) { istream = istreams.begin(); for ( int i=0; istream != istreams.end(); i++, istream++ ) { int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() ); if ( gr_sptr->output_multiple_set() ) { t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple()); } if ( t_noutput_items > 0 ) { if ( noutput_items == 0 ) { noutput_items = t_noutput_items; } if ( t_noutput_items <= noutput_items ) { noutput_items = t_noutput_items; } } } LOG_TRACE( probe_signal_i_base, " FIXED FORECAST NOUTPUT/output_multiple == " << noutput_items << "/" << gr_sptr->output_multiple()); } // // ask the block how much input they need to produce noutput_items... // if enough data is available to process then set the dataReady flag // int32_t outMultiple = gr_sptr->output_multiple(); while ( !dataReady && noutput_items >= outMultiple ) { // // ask the block how much input they need to produce noutput_items... // gr_sptr->forecast(noutput_items, _ninput_items_required); LOG_TRACE( probe_signal_i_base, "--> FORECAST IN/OUT " << _ninput_items_required[0] << "/" << noutput_items ); istream = istreams.begin(); uint32_t dr_cnt=0; for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) { // check if buffer has enough elements _input_ready[idx] = false; if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) { _input_ready[idx] = true; dr_cnt++; } LOG_TRACE( probe_signal_i_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << istream->nelems() << "/" << istream->nitems() << "/" << _ninput_items_required[idx] << "/" << _input_ready[idx]); } if ( dr_cnt < istreams.size() ) { if ( outMultiple > 1 ) { noutput_items -= outMultiple; } else { noutput_items /= 2; } } else { dataReady = true; } LOG_TRACE( probe_signal_i_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady ); } // check if data is ready... if ( !dataReady ) { LOG_TRACE( probe_signal_i_base, "DATA CHECK - NOT ENOUGH DATA AVAIL/REQ:" << _istreams[0].nitems() << "/" << _ninput_items_required[0] ); return -1; } // reset looping variables int ritems = 0; int nitems = 0; // reset caching vectors _output_items.clear(); _input_items.clear(); _ninput_items.clear(); istream = istreams.begin(); for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) { // check if the stream is ready if ( !_input_ready[idx] ) continue; // get number of items remaining try { ritems = gr_sptr->nitems_read( idx ); } catch(...){ // something bad has happened, we are missing an input stream LOG_ERROR( probe_signal_i_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" << istream->streamID ); return -2; } nitems = istream->nitems() - ritems; LOG_TRACE( probe_signal_i_base, " ISTREAM: IDX:" << idx << " ITEMS AVAIL/READ/REQ " << nitems << "/" << ritems << "/" << _ninput_items_required[idx] ); if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) { //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0; _ninput_items.push_back( nitems ); _input_items.push_back( (const void *) (istream->read_pointer(ritems)) ); } } nout=0; if ( _input_items.size() != 0 && serviceThread->threadRunning() ) { LOG_TRACE( probe_signal_i_base, " CALLING WORK.....N_OUT:" << noutput_items << " N_IN:" << nitems << " ISTREAMS:" << _input_items.size() << " OSTREAMS:" << _output_items.size()); nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items); // sink/analyzer patterns do not return items, so consume_each is not called in Gnu Radio BLOCK if ( nout == 0 ) { gr_sptr->consume_each(nitems); } LOG_TRACE( probe_signal_i_base, "RETURN WORK ..... N_OUT:" << nout); } // check for stop condition from work method if ( nout < gr_block::WORK_DONE ) { LOG_WARN( probe_signal_i_base, "WORK RETURNED STOP CONDITION..." << nout ); nout=0; eos = true; } } return nout; }
static void mxr_graph_fix_geometry(struct mxr_layer *layer, enum mxr_geometry_stage stage, unsigned long flags) { struct mxr_geometry *geo = &layer->geo; struct mxr_crop *src = &geo->src; struct mxr_crop *dst = &geo->dst; unsigned int x_center, y_center; switch (stage) { case MXR_GEOMETRY_SINK: /* nothing to be fixed here */ flags = 0; /* fall through */ case MXR_GEOMETRY_COMPOSE: /* remember center of the area */ x_center = dst->x_offset + dst->width / 2; y_center = dst->y_offset + dst->height / 2; /* round up/down to 2 multiple depending on flags */ if (flags & V4L2_SEL_FLAG_LE) { dst->width = round_down(dst->width, 2); dst->height = round_down(dst->height, 2); } else { dst->width = round_up(dst->width, 2); dst->height = round_up(dst->height, 2); } /* assure that compose rect is inside display area */ dst->width = min(dst->width, dst->full_width); dst->height = min(dst->height, dst->full_height); /* ensure that compose is reachable using 2x scaling */ dst->width = min(dst->width, 2 * src->full_width); dst->height = min(dst->height, 2 * src->full_height); /* setup offsets */ dst->x_offset = do_center(x_center, dst->width, dst->full_width, flags); dst->y_offset = do_center(y_center, dst->height, dst->full_height, flags); flags = 0; /* fall through */ case MXR_GEOMETRY_CROP: /* remember center of the area */ x_center = src->x_offset + src->width / 2; y_center = src->y_offset + src->height / 2; /* ensure that cropping area lies inside the buffer */ if (src->full_width < dst->width) src->width = dst->width / 2; else src->width = closest(src->width, dst->width / 2, dst->width, flags); if (src->width == dst->width) geo->x_ratio = 0; else geo->x_ratio = 1; if (src->full_height < dst->height) src->height = dst->height / 2; else src->height = closest(src->height, dst->height / 2, dst->height, flags); if (src->height == dst->height) geo->y_ratio = 0; else geo->y_ratio = 1; /* setup offsets */ src->x_offset = do_center(x_center, src->width, src->full_width, flags); src->y_offset = do_center(y_center, src->height, src->full_height, flags); flags = 0; /* fall through */ case MXR_GEOMETRY_SOURCE: src->full_width = clamp_val(src->full_width, src->width + src->x_offset, 32767); src->full_height = clamp_val(src->full_height, src->height + src->y_offset, 2047); }; }
static int qproc_load_segments(struct qproc *qproc, const struct firmware *fw) { struct device *dev = qproc->dev; struct elf32_hdr *ehdr; struct elf32_phdr *phdr; int i, ret = 0; const u8 *elf_data = fw->data; const struct firmware *seg_fw; char fw_name[20]; const struct mdt_hdr *mdt; phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; phys_addr_t max_addr = 0; size_t align = 0; bool relocatable = false; phys_addr_t paddr; ehdr = (struct elf32_hdr *)elf_data; phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); mdt = (struct mdt_hdr *)fw->data; ehdr = &mdt->hdr; for (i = 0; i < ehdr->e_phnum; i++) { phdr = &mdt->phdr[i]; if (!segment_is_loadable(phdr)) continue; if (phdr->p_paddr < min_addr) { min_addr = phdr->p_paddr; if (segment_is_relocatable(phdr)) { align = phdr->p_align; relocatable = true; } } if (phdr->p_paddr + phdr->p_memsz > max_addr) max_addr = round_up(phdr->p_paddr + phdr->p_memsz, SZ_4K); } ehdr = (struct elf32_hdr *)elf_data; phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); /* go through the available ELF segments */ for (i = 0; i < ehdr->e_phnum; i++, phdr++) { u32 da = phdr->p_paddr; u32 paddr = phdr->p_paddr; u32 memsz = phdr->p_memsz; u32 filesz = phdr->p_filesz; void *ptr; if (!segment_is_loadable(phdr)) continue; /* if (phdr->p_type != PT_LOAD) continue; if (segment_is_hash(phdr->p_flags)) continue; if (filesz == 0) continue; */ //dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", pr_emerg("phdr(%d): type %d paddr 0x%x memsz 0x%x filesz 0x%x\n", i, phdr->p_type, paddr, memsz, filesz); if (filesz > memsz) { dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", filesz, memsz); ret = -EINVAL; break; } paddr = relocatable ? (phdr->p_paddr - min_addr + qproc->reloc_phys) : phdr->p_paddr; pr_emerg("Relocated-phdr(%d): type %d paddr 0x%x memsz 0x%x filesz 0x%x\n", i, phdr->p_type, paddr, memsz, filesz); // if (filesz) { snprintf(fw_name, sizeof(fw_name), "modem.b%02d", i); ret = qproc_load_segment(qproc, fw_name, phdr, paddr); // } #if 0 ptr = ioremap(da, memsz); if (!ptr) { dev_err(qproc->dev, "failed to allocate mba metadata buffer\n"); ret = -ENOMEM; break; } if (filesz) { snprintf(fw_name, sizeof(fw_name), "modem.b%02d", i); ret = request_firmware(&seg_fw, fw_name, qproc->dev); if (ret) { iounmap(ptr); break; } memcpy(ptr, seg_fw->data, filesz); release_firmware(seg_fw); } if (memsz > filesz) memset(ptr + filesz, 0, memsz - filesz); wmb(); iounmap(ptr); #endif } return ret; }
/* for non-agg data frame or management frame */ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe) { s32 ret = _SUCCESS; s32 inner_ret = _SUCCESS; int t, sz, w_sz, pull = 0; u8 *mem_addr; u32 ff_hwaddr; struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf; struct pkt_attrib *pattrib = &pxmitframe->attrib; struct xmit_priv *pxmitpriv = &adapt->xmitpriv; struct security_priv *psecuritypriv = &adapt->securitypriv; if ((pxmitframe->frame_tag == DATA_FRAMETAG) && (pxmitframe->attrib.ether_type != 0x0806) && (pxmitframe->attrib.ether_type != 0x888e) && (pxmitframe->attrib.ether_type != 0x88b4) && (pxmitframe->attrib.dhcp_pkt != 1)) rtw_issue_addbareq_cmd(adapt, pxmitframe); mem_addr = pxmitframe->buf_addr; RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n")); for (t = 0; t < pattrib->nr_frags; t++) { if (inner_ret != _SUCCESS && ret == _SUCCESS) ret = _FAIL; if (t != (pattrib->nr_frags - 1)) { RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags)); sz = pxmitpriv->frag_len; sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len); } else { /* no frag */ sz = pattrib->last_txcmdsz; } pull = update_txdesc(pxmitframe, mem_addr, sz, false); if (pull) { mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */ pxmitframe->buf_addr = mem_addr; w_sz = sz + TXDESC_SIZE; } else { w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ; } ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe); inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf); rtw_count_tx_stats(adapt, pxmitframe, sz); RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz)); mem_addr += w_sz; mem_addr = (u8 *)round_up((size_t)mem_addr, 4); } rtw_free_xmitframe(pxmitpriv, pxmitframe); if (ret != _SUCCESS) rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN); return ret; }
void *round_up (void *a, size_t amt) { return (void*)round_up((uintptr_t)a, amt); }
static struct proxy_output_ctx *alloc_proxy_output_ffmpeg( struct anim *anim, AVStream *st, int proxy_size, int width, int height, int quality) { struct proxy_output_ctx *rv = MEM_callocN( sizeof(struct proxy_output_ctx), "alloc_proxy_output"); char fname[FILE_MAX]; int ffmpeg_quality; /* JPEG requires this */ width = round_up(width, 8); height = round_up(height, 8); rv->proxy_size = proxy_size; rv->anim = anim; get_proxy_filename(rv->anim, rv->proxy_size, fname, true); BLI_make_existing_file(fname); rv->of = avformat_alloc_context(); rv->of->oformat = av_guess_format("avi", NULL, NULL); BLI_strncpy(rv->of->filename, fname, sizeof(rv->of->filename)); fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename); rv->st = avformat_new_stream(rv->of, NULL); rv->st->id = 0; rv->c = rv->st->codec; rv->c->codec_type = AVMEDIA_TYPE_VIDEO; rv->c->codec_id = AV_CODEC_ID_MJPEG; rv->c->width = width; rv->c->height = height; rv->of->oformat->video_codec = rv->c->codec_id; rv->codec = avcodec_find_encoder(rv->c->codec_id); if (!rv->codec) { fprintf(stderr, "No ffmpeg MJPEG encoder available? " "Proxy not built!\n"); av_free(rv->of); return NULL; } if (rv->codec->pix_fmts) { rv->c->pix_fmt = rv->codec->pix_fmts[0]; } else { rv->c->pix_fmt = AV_PIX_FMT_YUVJ420P; } rv->c->sample_aspect_ratio = rv->st->sample_aspect_ratio = st->codec->sample_aspect_ratio; rv->c->time_base.den = 25; rv->c->time_base.num = 1; rv->st->time_base = rv->c->time_base; /* there's no way to set JPEG quality in the same way as in AVI JPEG and image sequence, * but this seems to be giving expected quality result */ ffmpeg_quality = (int)(1.0f + 30.0f * (1.0f - (float)quality / 100.0f) + 0.5f); av_opt_set_int(rv->c, "qmin", ffmpeg_quality, 0); av_opt_set_int(rv->c, "qmax", ffmpeg_quality, 0); if (rv->of->flags & AVFMT_GLOBALHEADER) { rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "Couldn't open outputfile! " "Proxy not built!\n"); av_free(rv->of); return 0; } avcodec_open2(rv->c, rv->codec, NULL); rv->orig_height = av_get_cropped_height_from_codec(st->codec); if (st->codec->width != width || st->codec->height != height || st->codec->pix_fmt != rv->c->pix_fmt) { rv->frame = av_frame_alloc(); avpicture_fill((AVPicture *) rv->frame, MEM_mallocN(avpicture_get_size( rv->c->pix_fmt, round_up(width, 16), height), "alloc proxy output frame"), rv->c->pix_fmt, round_up(width, 16), height); rv->sws_ctx = sws_getContext( st->codec->width, rv->orig_height, st->codec->pix_fmt, width, height, rv->c->pix_fmt, SWS_FAST_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL); } if (avformat_write_header(rv->of, NULL) < 0) { fprintf(stderr, "Couldn't set output parameters? " "Proxy not built!\n"); av_free(rv->of); return 0; } return rv; }
#include "cpu.h" #include "errno.h" #include "protobuf.h" #include "protobuf/core.pb-c.h" #include "protobuf/creds.pb-c.h" /* * Injected syscall instruction */ const char code_syscall[] = { 0x0f, 0x05, /* syscall */ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc /* int 3, ... */ }; const int code_syscall_size = round_up(sizeof(code_syscall), sizeof(long)); static inline __always_unused void __check_code_syscall(void) { BUILD_BUG_ON(sizeof(code_syscall) != BUILTIN_SYSCALL_SIZE); BUILD_BUG_ON(!is_log2(sizeof(code_syscall))); } void parasite_setup_regs(unsigned long new_ip, void *stack, user_regs_struct_t *regs) { regs->ip = new_ip; if (stack) regs->sp = (unsigned long) stack; /* Avoid end of syscall processing */ regs->orig_ax = -1;
/** Entry point of the test kernel. * @param magic KBoot magic number. * @param tags Tag list pointer. */ void kmain(uint32_t magic, kboot_tag_t *tags) { debug_console_init(); if (magic != KBOOT_MAGIC) { printf("Incorrect magic number 0x%x\n", magic); while (true) arch_pause(); } mm_init(tags); primary_console_init(tags); printf("Test kernel loaded: magic: 0x%x, tags: %p\n", magic, tags); while (tags->type != KBOOT_TAG_NONE) { switch (tags->type) { case KBOOT_TAG_CORE: dump_core_tag((kboot_tag_core_t *)tags); break; case KBOOT_TAG_OPTION: dump_option_tag((kboot_tag_option_t *)tags); break; case KBOOT_TAG_MEMORY: dump_memory_tag((kboot_tag_memory_t *)tags); break; case KBOOT_TAG_VMEM: dump_vmem_tag((kboot_tag_vmem_t *)tags); break; case KBOOT_TAG_PAGETABLES: dump_pagetables_tag((kboot_tag_pagetables_t *)tags); break; case KBOOT_TAG_MODULE: dump_module_tag((kboot_tag_module_t *)tags); break; case KBOOT_TAG_VIDEO: dump_video_tag((kboot_tag_video_t *)tags); break; case KBOOT_TAG_BOOTDEV: dump_bootdev_tag((kboot_tag_bootdev_t *)tags); break; case KBOOT_TAG_LOG: dump_log_tag((kboot_tag_log_t *)tags); break; case KBOOT_TAG_SECTIONS: dump_sections_tag((kboot_tag_sections_t *)tags); break; case KBOOT_TAG_BIOS_E820: dump_bios_e820_tag((kboot_tag_bios_e820_t *)tags); break; case KBOOT_TAG_EFI: dump_efi_tag((kboot_tag_efi_t *)tags); break; } tags = (kboot_tag_t *)round_up((ptr_t)tags + tags->size, 8); } printf("Tag list dump complete\n"); #if defined(__i386__) || defined(__x86_64__) __asm__ volatile("wbinvd"); #endif while (true) arch_pause(); }
BigInt monty_multi_exp(std::shared_ptr<const Montgomery_Params> params_p, const BigInt& x_bn, const BigInt& z1, const BigInt& y_bn, const BigInt& z2) { if(z1.is_negative() || z2.is_negative()) throw Invalid_Argument("multi_exponentiate exponents must be positive"); const size_t z_bits = round_up(std::max(z1.bits(), z2.bits()), 2); secure_vector<word> ws; const Montgomery_Int one(params_p, params_p->R1(), false); //const Montgomery_Int one(params_p, 1); const Montgomery_Int x1(params_p, x_bn); const Montgomery_Int x2 = x1.square(ws); const Montgomery_Int x3 = x2.mul(x1, ws); const Montgomery_Int y1(params_p, y_bn); const Montgomery_Int y2 = y1.square(ws); const Montgomery_Int y3 = y2.mul(y1, ws); const Montgomery_Int y1x1 = y1.mul(x1, ws); const Montgomery_Int y1x2 = y1.mul(x2, ws); const Montgomery_Int y1x3 = y1.mul(x3, ws); const Montgomery_Int y2x1 = y2.mul(x1, ws); const Montgomery_Int y2x2 = y2.mul(x2, ws); const Montgomery_Int y2x3 = y2.mul(x3, ws); const Montgomery_Int y3x1 = y3.mul(x1, ws); const Montgomery_Int y3x2 = y3.mul(x2, ws); const Montgomery_Int y3x3 = y3.mul(x3, ws); const Montgomery_Int* M[16] = { &one, &x1, // 0001 &x2, // 0010 &x3, // 0011 &y1, // 0100 &y1x1, &y1x2, &y1x3, &y2, // 1000 &y2x1, &y2x2, &y2x3, &y3, // 1100 &y3x1, &y3x2, &y3x3 }; Montgomery_Int H = one; for(size_t i = 0; i != z_bits; i += 2) { if(i > 0) { H.square_this(ws); H.square_this(ws); } const uint8_t z1_b = z1.get_substring(z_bits - i - 2, 2); const uint8_t z2_b = z2.get_substring(z_bits - i - 2, 2); const uint8_t z12 = (4*z2_b) + z1_b; H.mul_by(*M[z12], ws); } return H.value(); }
void kasan_poison_object_data(struct kmem_cache *cache, void *object) { kasan_poison_shadow(object, round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), KASAN_KMALLOC_REDZONE); }
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) { u32 proglen; u32 alloclen; u8 *image = NULL; u32 *code_base; u32 *addrs; struct powerpc64_jit_data *jit_data; struct codegen_context cgctx; int pass; int flen; struct bpf_binary_header *bpf_hdr; struct bpf_prog *org_fp = fp; struct bpf_prog *tmp_fp; bool bpf_blinded = false; bool extra_pass = false; if (!fp->jit_requested) return org_fp; tmp_fp = bpf_jit_blind_constants(org_fp); if (IS_ERR(tmp_fp)) return org_fp; if (tmp_fp != org_fp) { bpf_blinded = true; fp = tmp_fp; } jit_data = fp->aux->jit_data; if (!jit_data) { jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); if (!jit_data) { fp = org_fp; goto out; } fp->aux->jit_data = jit_data; } flen = fp->len; addrs = jit_data->addrs; if (addrs) { cgctx = jit_data->ctx; image = jit_data->image; bpf_hdr = jit_data->header; proglen = jit_data->proglen; alloclen = proglen + FUNCTION_DESCR_SIZE; extra_pass = true; goto skip_init_ctx; } addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) { fp = org_fp; goto out_addrs; } memset(&cgctx, 0, sizeof(struct codegen_context)); /* Make sure that the stack is quadword aligned. */ cgctx.stack_size = round_up(fp->aux->stack_depth, 16); /* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) { /* We hit something illegal or unsupported. */ fp = org_fp; goto out_addrs; } /* * Pretend to build prologue, given the features we've seen. This will * update ctgtx.idx as it pretends to output instructions, then we can * calculate total size from idx. */ bpf_jit_build_prologue(0, &cgctx); bpf_jit_build_epilogue(0, &cgctx); proglen = cgctx.idx * 4; alloclen = proglen + FUNCTION_DESCR_SIZE; bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns); if (!bpf_hdr) { fp = org_fp; goto out_addrs; } skip_init_ctx: code_base = (u32 *)(image + FUNCTION_DESCR_SIZE); if (extra_pass) { /* * Do not touch the prologue and epilogue as they will remain * unchanged. Only fix the branch target address for subprog * calls in the body. * * This does not change the offsets and lengths of the subprog * call instruction sequences and hence, the size of the JITed * image as well. */ bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs); /* There is no need to perform the usual passes. */ goto skip_codegen_passes; } /* Code generation passes 1-2 */ for (pass = 1; pass < 3; pass++) { /* Now build the prologue, body code & epilogue for real. */ cgctx.idx = 0; bpf_jit_build_prologue(code_base, &cgctx); bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass); bpf_jit_build_epilogue(code_base, &cgctx); if (bpf_jit_enable > 1) pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, proglen - (cgctx.idx * 4), cgctx.seen); } skip_codegen_passes: if (bpf_jit_enable > 1) /* * Note that we output the base address of the code_base * rather than image, since opcodes are in code_base. */ bpf_jit_dump(flen, proglen, pass, code_base); #ifdef PPC64_ELF_ABI_v1 /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; #endif fp->bpf_func = (void *)image; fp->jited = 1; fp->jited_len = alloclen; bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); if (!fp->is_func || extra_pass) { out_addrs: kfree(addrs); kfree(jit_data); fp->aux->jit_data = NULL; } else { jit_data->addrs = addrs; jit_data->ctx = cgctx; jit_data->proglen = proglen; jit_data->image = image; jit_data->header = bpf_hdr; } out: if (bpf_blinded) bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp); return fp; }
static int tegra_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct tegra_fbdev *fbdev = to_tegra_fbdev(helper); struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; struct drm_mode_fb_cmd2 cmd = { 0 }; unsigned int bytes_per_pixel; struct drm_framebuffer *fb; unsigned long offset; struct fb_info *info; struct tegra_bo *bo; size_t size; int err; bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); cmd.width = sizes->surface_width; cmd.height = sizes->surface_height; cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel, tegra->pitch_align); cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = cmd.pitches[0] * cmd.height; bo = tegra_bo_create(drm, size, 0); if (IS_ERR(bo)) return PTR_ERR(bo); info = framebuffer_alloc(0, drm->dev); if (!info) { dev_err(drm->dev, "failed to allocate framebuffer info\n"); tegra_bo_free_object(&bo->gem); return -ENOMEM; } fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); if (IS_ERR(fbdev->fb)) { dev_err(drm->dev, "failed to allocate DRM framebuffer\n"); err = PTR_ERR(fbdev->fb); goto release; } fb = &fbdev->fb->base; helper->fb = fb; helper->fbdev = info; info->par = helper; info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &tegra_fb_ops; err = fb_alloc_cmap(&info->cmap, 256, 0); if (err < 0) { dev_err(drm->dev, "failed to allocate color map: %d\n", err); goto destroy; } drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, helper, fb->width, fb->height); offset = info->var.xoffset * bytes_per_pixel + info->var.yoffset * fb->pitches[0]; if (bo->pages) { bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); if (!bo->vaddr) { dev_err(drm->dev, "failed to vmap() framebuffer\n"); err = -ENOMEM; goto destroy; } } drm->mode_config.fb_base = (resource_size_t)bo->paddr; info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; info->fix.smem_start = (unsigned long)(bo->paddr + offset); info->fix.smem_len = size; return 0; destroy: drm_framebuffer_unregister_private(fb); tegra_fb_destroy(fb); release: framebuffer_release(info); return err; }
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_session_ctx *ctx, const uint8_t *auth_key, unsigned int auth_keylen) { struct qat_auth_state auth_state; struct { struct shash_desc shash; char ctx[crypto_shash_descsize(ctx->hash_tfm)]; } desc; struct sha1_state sha1; struct sha256_state sha256; struct sha512_state sha512; int block_size = crypto_shash_blocksize(ctx->hash_tfm); int digest_size = crypto_shash_digestsize(ctx->hash_tfm); uint8_t *ipad = auth_state.data; uint8_t *opad = ipad + block_size; __be32 *hash_state_out; __be64 *hash512_state_out; int i, offset; memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64); desc.shash.tfm = ctx->hash_tfm; desc.shash.flags = 0x0; if (auth_keylen > block_size) { char buff[SHA512_BLOCK_SIZE]; int ret = crypto_shash_digest(&desc.shash, auth_key, auth_keylen, buff); if (ret) return ret; memcpy(ipad, buff, digest_size); memcpy(opad, buff, digest_size); memset(ipad + digest_size, 0, block_size - digest_size); memset(opad + digest_size, 0, block_size - digest_size); } else { memcpy(ipad, auth_key, auth_keylen); memcpy(opad, auth_key, auth_keylen); memset(ipad + auth_keylen, 0, block_size - auth_keylen); memset(opad + auth_keylen, 0, block_size - auth_keylen); } for (i = 0; i < block_size; i++) { char *ipad_ptr = ipad + i; char *opad_ptr = opad + i; *ipad_ptr ^= 0x36; *opad_ptr ^= 0x5C; } if (crypto_shash_init(&desc.shash)) return -EFAULT; if (crypto_shash_update(&desc.shash, ipad, block_size)) return -EFAULT; hash_state_out = (__be32 *)hash->sha.state1; hash512_state_out = (__be64 *)hash_state_out; switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: if (crypto_shash_export(&desc.shash, &sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(*(sha1.state + i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: if (crypto_shash_export(&desc.shash, &sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(*(sha256.state + i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: if (crypto_shash_export(&desc.shash, &sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(*(sha512.state + i)); break; default: return -EFAULT; } if (crypto_shash_init(&desc.shash)) return -EFAULT; if (crypto_shash_update(&desc.shash, opad, block_size)) return -EFAULT; offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); hash_state_out = (__be32 *)(hash->sha.state1 + offset); hash512_state_out = (__be64 *)hash_state_out; switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: if (crypto_shash_export(&desc.shash, &sha1)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(*(sha1.state + i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: if (crypto_shash_export(&desc.shash, &sha256)) return -EFAULT; for (i = 0; i < digest_size >> 2; i++, hash_state_out++) *hash_state_out = cpu_to_be32(*(sha256.state + i)); break; case ICP_QAT_HW_AUTH_ALGO_SHA512: if (crypto_shash_export(&desc.shash, &sha512)) return -EFAULT; for (i = 0; i < digest_size >> 3; i++, hash512_state_out++) *hash512_state_out = cpu_to_be64(*(sha512.state + i)); break; default: return -EFAULT; } return 0; }
static void emac_inblk_32bit(void __iomem *reg, void *data, int count) { readsl(reg, data, round_up(count, 4) / 4); }
static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx, int alg, struct crypto_authenc_keys *keys) { struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize; struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash; struct icp_qat_hw_cipher_algo_blk *cipher = (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx + sizeof(struct icp_qat_hw_auth_setup) + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; void *ptr = &req_tmpl->cd_ctrl; struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; struct icp_qat_fw_la_auth_req_params *auth_param = (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); /* CD setup */ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg); memcpy(cipher->aes.key, keys->enckey, keys->enckeylen); hash->sha.inner_setup.auth_config.config = ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, ctx->qat_hash_alg, digestsize); hash->sha.inner_setup.auth_counter.counter = cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) return -EFAULT; /* Request setup */ qat_alg_init_common_hdr(header); header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_CMP_AUTH_RES); cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; /* Cipher CD config setup */ cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3; cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3; cipher_cd_ctrl->cipher_cfg_offset = (sizeof(struct icp_qat_hw_auth_setup) + roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); /* Auth CD config setup */ hash_cd_ctrl->hash_cfg_offset = 0; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; hash_cd_ctrl->inner_res_sz = digestsize; hash_cd_ctrl->final_sz = digestsize; switch (ctx->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SHA1: hash_cd_ctrl->inner_state1_sz = round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8); hash_cd_ctrl->inner_state2_sz = round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8); break; case ICP_QAT_HW_AUTH_ALGO_SHA256: hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ; hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_SHA512: hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ; hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ; break; default: break; } hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + ((sizeof(struct icp_qat_hw_auth_setup) + round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3); auth_param->auth_res_sz = digestsize; ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH); ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); return 0; }
template < typename IN_PORT_TYPE, typename OUT_PORT_TYPE > int throttle_cc_base::_forecastAndProcess( bool &eos, typename std::vector< gr_istream< IN_PORT_TYPE > > &istreams , typename std::vector< gr_ostream< OUT_PORT_TYPE > > &ostreams ) { typedef typename std::vector< gr_istream< IN_PORT_TYPE > > _IStreamList; typedef typename std::vector< gr_ostream< OUT_PORT_TYPE > > _OStreamList; typename _OStreamList::iterator ostream; typename _IStreamList::iterator istream = istreams.begin(); int nout = 0; bool dataReady = false; if ( !eos ) { uint64_t max_items_avail = 0; for ( int idx=0 ; istream != istreams.end() && serviceThread->threadRunning() ; idx++, istream++ ) { LOG_TRACE( throttle_cc_base, "GET MAX ITEMS: STREAM:"<< idx << " NITEMS/SCALARS:" << istream->nitems() << "/" << istream->_data.size() ); max_items_avail = std::max( istream->nitems(), max_items_avail ); } if ( max_items_avail == 0 ) { LOG_TRACE( throttle_cc_base, "DATA CHECK - MAX ITEMS NOUTPUT/MAX_ITEMS:" << noutput_items << "/" << max_items_avail); return -1; } // // calc number of output elements based on input items available // noutput_items = 0; if ( !gr_sptr->fixed_rate() ) { noutput_items = round_down((int32_t) (max_items_avail * gr_sptr->relative_rate()), gr_sptr->output_multiple()); LOG_TRACE( throttle_cc_base, " VARIABLE FORECAST NOUTPUT == " << noutput_items ); } else { istream = istreams.begin(); for ( int i=0; istream != istreams.end(); i++, istream++ ) { int t_noutput_items = gr_sptr->fixed_rate_ninput_to_noutput( istream->nitems() ); if ( gr_sptr->output_multiple_set() ) { t_noutput_items = round_up(t_noutput_items, gr_sptr->output_multiple()); } if ( t_noutput_items > 0 ) { if ( noutput_items == 0 ) { noutput_items = t_noutput_items; } if ( t_noutput_items <= noutput_items ) { noutput_items = t_noutput_items; } } } LOG_TRACE( throttle_cc_base, " FIXED FORECAST NOUTPUT/output_multiple == " << noutput_items << "/" << gr_sptr->output_multiple()); } // // ask the block how much input they need to produce noutput_items... // if enough data is available to process then set the dataReady flag // int32_t outMultiple = gr_sptr->output_multiple(); while ( !dataReady && noutput_items >= outMultiple ) { // // ask the block how much input they need to produce noutput_items... // gr_sptr->forecast(noutput_items, _ninput_items_required); LOG_TRACE( throttle_cc_base, "--> FORECAST IN/OUT " << _ninput_items_required[0] << "/" << noutput_items ); istream = istreams.begin(); uint32_t dr_cnt=0; for ( int idx=0 ; noutput_items > 0 && istream != istreams.end(); idx++, istream++ ) { // check if buffer has enough elements _input_ready[idx] = false; if ( istream->nitems() >= (uint64_t)_ninput_items_required[idx] ) { _input_ready[idx] = true; dr_cnt++; } LOG_TRACE( throttle_cc_base, "ISTREAM DATACHECK NELMS/NITEMS/REQ/READY:" << istream->nelems() << "/" << istream->nitems() << "/" << _ninput_items_required[idx] << "/" << _input_ready[idx]); } if ( dr_cnt < istreams.size() ) { if ( outMultiple > 1 ) { noutput_items -= outMultiple; } else { noutput_items /= 2; } } else { dataReady = true; } LOG_TRACE( throttle_cc_base, " TRIM FORECAST NOUTPUT/READY " << noutput_items << "/" << dataReady ); } // check if data is ready... if ( !dataReady ) { LOG_TRACE( throttle_cc_base, "DATA CHECK - NOT ENOUGH DATA AVAIL/REQ:" << _istreams[0].nitems() << "/" << _ninput_items_required[0] ); return -1; } // reset looping variables int ritems = 0; int nitems = 0; // reset caching vectors _output_items.clear(); _input_items.clear(); _ninput_items.clear(); istream = istreams.begin(); for ( int idx=0 ; istream != istreams.end(); idx++, istream++ ) { // check if the stream is ready if ( !_input_ready[idx] ) { continue; } // get number of items remaining try { ritems = gr_sptr->nitems_read( idx ); } catch(...){ // something bad has happened, we are missing an input stream LOG_ERROR( throttle_cc_base, "MISSING INPUT STREAM FOR GR BLOCK, STREAM ID:" << istream->streamID ); return -2; } nitems = istream->nitems() - ritems; LOG_TRACE( throttle_cc_base, " ISTREAM: IDX:" << idx << " ITEMS AVAIL/READ/REQ " << nitems << "/" << ritems << "/" << _ninput_items_required[idx] ); if ( nitems >= _ninput_items_required[idx] && nitems > 0 ) { //remove eos checks ...if ( nitems < _ninput_items_required[idx] ) nitems=0; _ninput_items.push_back( nitems ); _input_items.push_back( (const void *) (istream->read_pointer(ritems)) ); } } // // setup output buffer vector based on noutput.. // ostream = ostreams.begin(); for( ; ostream != ostreams.end(); ostream++ ) { ostream->resize(noutput_items); _output_items.push_back((void*)(ostream->write_pointer()) ); } nout=0; if ( _input_items.size() != 0 && serviceThread->threadRunning() ) { LOG_TRACE( throttle_cc_base, " CALLING WORK.....N_OUT:" << noutput_items << " N_IN:" << nitems << " ISTREAMS:" << _input_items.size() << " OSTREAMS:" << _output_items.size()); nout = gr_sptr->general_work( noutput_items, _ninput_items, _input_items, _output_items); LOG_TRACE( throttle_cc_base, "RETURN WORK ..... N_OUT:" << nout); } // check for stop condition from work method if ( nout < gr_block::WORK_DONE ) { LOG_WARN( throttle_cc_base, "WORK RETURNED STOP CONDITION..." << nout ); nout=0; eos = true; } } if (nout != 0 or eos ) { noutput_items = nout; LOG_TRACE( throttle_cc_base, " WORK RETURNED: NOUT : " << nout << " EOS:" << eos); ostream = ostreams.begin(); typename IN_PORT_TYPE::dataTransfer *pkt=NULL; for ( int idx=0 ; ostream != ostreams.end(); idx++, ostream++ ) { pkt=NULL; int inputIdx = idx; if ( (size_t)(inputIdx) >= istreams.size() ) { for ( inputIdx= istreams.size()-1; inputIdx > -1; inputIdx--) { if ( istreams[inputIdx].pkt != NULL ) { pkt = istreams[inputIdx].pkt; break; } } } else { pkt = istreams[inputIdx].pkt; } LOG_TRACE( throttle_cc_base, "PUSHING DATA ITEMS/STREAM_ID " << ostream->nitems() << "/" << ostream->streamID ); if ( _maintainTimeStamp ) { // set time stamp for output samples based on input time stamp if ( ostream->nelems() == 0 ) { #ifdef TEST_TIME_STAMP LOG_DEBUG( throttle_cc_base, "SEED - TS SRI: xdelta:" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( throttle_cc_base, "OSTREAM WRITE: maint:" << _maintainTimeStamp ); LOG_DEBUG( throttle_cc_base, " mode:" << ostream->tstamp.tcmode ); LOG_DEBUG( throttle_cc_base, " status:" << ostream->tstamp.tcstatus ); LOG_DEBUG( throttle_cc_base, " offset:" << ostream->tstamp.toff ); LOG_DEBUG( throttle_cc_base, " whole:" << std::setprecision(10) << ostream->tstamp.twsec ); LOG_DEBUG( throttle_cc_base, "SEED - TS frac:" << std::setprecision(12) << ostream->tstamp.tfsec ); #endif ostream->setTimeStamp( pkt->T, _maintainTimeStamp ); } // write out samples, and set next time stamp based on xdelta and noutput_items ostream->write ( noutput_items, eos ); } else { // use incoming packet's time stamp to forward if ( pkt ) { #ifdef TEST_TIME_STAMP LOG_DEBUG( throttle_cc_base, "OSTREAM SRI: items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( throttle_cc_base, "PKT - TS maint:" << _maintainTimeStamp ); LOG_DEBUG( throttle_cc_base, " mode:" << pkt->T.tcmode ); LOG_DEBUG( throttle_cc_base, " status:" << pkt->T.tcstatus ); LOG_DEBUG( throttle_cc_base, " offset:" << pkt->T.toff ); LOG_DEBUG( throttle_cc_base, " whole:" << std::setprecision(10) << pkt->T.twsec ); LOG_DEBUG( throttle_cc_base, "PKT - TS frac:" << std::setprecision(12) << pkt->T.tfsec ); #endif ostream->write( noutput_items, eos, pkt->T ); } else { #ifdef TEST_TIME_STAMP LOG_DEBUG( throttle_cc_base, "OSTREAM SRI: items/xdelta:" << noutput_items << "/" << std::setprecision(12) << ostream->sri.xdelta ); LOG_DEBUG( throttle_cc_base, "OSTREAM TOD maint:" << _maintainTimeStamp ); LOG_DEBUG( throttle_cc_base, " mode:" << ostream->tstamp.tcmode ); LOG_DEBUG( throttle_cc_base, " status:" << ostream->tstamp.tcstatus ); LOG_DEBUG( throttle_cc_base, " offset:" << ostream->tstamp.toff ); LOG_DEBUG( throttle_cc_base, " whole:" << std::setprecision(10) << ostream->tstamp.twsec ); LOG_DEBUG( throttle_cc_base, "OSTREAM TOD frac:" << std::setprecision(12) << ostream->tstamp.tfsec ); #endif // use time of day as time stamp ostream->write( noutput_items, eos, _maintainTimeStamp ); } } } // for ostreams } return nout; }
/* * Get a layout for the pNFS client. */ int xfs_fs_map_blocks( struct inode *inode, loff_t offset, u64 length, struct iomap *iomap, bool write, u32 *device_generation) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec imap; xfs_fileoff_t offset_fsb, end_fsb; loff_t limit; int bmapi_flags = XFS_BMAPI_ENTIRE; int nimaps = 1; uint lock_flags; int error = 0; if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; /* * We can't export inodes residing on the realtime device. The realtime * device doesn't have a UUID to identify it, so the client has no way * to find it. */ if (XFS_IS_REALTIME_INODE(ip)) return -ENXIO; /* * Lock out any other I/O before we flush and invalidate the pagecache, * and then hand out a layout to the remote system. This is very * similar to direct I/O, except that the synchronization is much more * complicated. See the comment near xfs_break_layouts for a detailed * explanation. */ xfs_ilock(ip, XFS_IOLOCK_EXCL); error = -EINVAL; limit = mp->m_super->s_maxbytes; if (!write) limit = max(limit, round_up(i_size_read(inode), inode->i_sb->s_blocksize)); if (offset > limit) goto out_unlock; if (offset > limit - length) length = limit - offset; error = filemap_write_and_wait(inode->i_mapping); if (error) goto out_unlock; error = invalidate_inode_pages2(inode->i_mapping); if (WARN_ON_ONCE(error)) return error; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); offset_fsb = XFS_B_TO_FSBT(mp, offset); lock_flags = xfs_ilock_data_map_shared(ip); error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, &nimaps, bmapi_flags); xfs_iunlock(ip, lock_flags); if (error) goto out_unlock; if (write) { enum xfs_prealloc_flags flags = 0; ASSERT(imap.br_startblock != DELAYSTARTBLOCK); if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { /* * xfs_iomap_write_direct() expects to take ownership of * the shared ilock. */ xfs_ilock(ip, XFS_ILOCK_SHARED); error = xfs_iomap_write_direct(ip, offset, length, &imap, nimaps); if (error) goto out_unlock; /* * Ensure the next transaction is committed * synchronously so that the blocks allocated and * handed out to the client are guaranteed to be * present even after a server crash. */ flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC; } error = xfs_update_prealloc_flags(ip, flags); if (error) goto out_unlock; } xfs_iunlock(ip, XFS_IOLOCK_EXCL); xfs_bmbt_to_iomap(ip, iomap, &imap); *device_generation = mp->m_generation; return error; out_unlock: xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; }