/* * Winbind keeps wbcDomainSid fields in host-endian. Copy fields from the * wsid to the csid, while converting the subauthority fields to LE. */ static void wsid_to_csid(struct cifs_sid *csid, struct wbcDomainSid *wsid) { int i; uint8_t num_subauth = (wsid->num_auths <= SID_MAX_SUB_AUTHORITIES) ? wsid->num_auths : SID_MAX_SUB_AUTHORITIES; csid->revision = wsid->sid_rev_num; csid->num_subauth = num_subauth; for (i = 0; i < NUM_AUTHS; i++) csid->authority[i] = wsid->id_auth[i]; for (i = 0; i < num_subauth; i++) csid->sub_auth[i] = htole32(wsid->sub_auths[i]); }
bool _send_unix_msg(int sockd, const char *buf, int timeout, const char *file, const char *func, const int line) { uint32_t msglen, len; bool retval = false; int ret, ern; if (unlikely(sockd < 0)) { LOGWARNING("Attempting to send unix message to invalidated sockd %d", sockd); goto out; } if (unlikely(!buf)) { LOGWARNING("Null message sent to send_unix_msg"); goto out; } len = strlen(buf); if (unlikely(!len)) { LOGWARNING("Zero length message sent to send_unix_msg"); goto out; } msglen = htole32(len); ret = wait_write_select(sockd, timeout); if (unlikely(ret < 1)) { ern = errno; LOGERR("Select1 failed in send_unix_msg (%d)", ern); goto out; } ret = _write_length(sockd, &msglen, 4, file, func, line); if (unlikely(ret < 4)) { LOGERR("Failed to write 4 byte length in send_unix_msg"); goto out; } ret = wait_write_select(sockd, timeout); if (unlikely(ret < 1)) { ern = errno; LOGERR("Select2 failed in send_unix_msg (%d)", ern); goto out; } ret = _write_length(sockd, buf, len, file, func, line); if (unlikely(ret < 0)) { LOGERR("Failed to write %d bytes in send_unix_msg", len); goto out; } retval = true; out: shutdown(sockd, SHUT_WR); if (unlikely(!retval)) LOGERR("Failure in send_unix_msg from %s %s:%d", file, func, line); return retval; }
/* * Compress executable and output it in relocatable object format. */ void kgzcmp(struct kgz_hdr *kh, const char *f1, const char *f2) { struct iodesc idi, ido; struct kgz_hdr khle; if ((idi.fd = open(idi.fname = f1, O_RDONLY)) == -1) err(1, "%s", idi.fname); if ((ido.fd = open(ido.fname = f2, O_CREAT | O_TRUNC | O_WRONLY, 0666)) == -1) err(1, "%s", ido.fname); kh->ident[0] = KGZ_ID0; kh->ident[1] = KGZ_ID1; kh->ident[2] = KGZ_ID2; kh->ident[3] = KGZ_ID3; mk_data(&idi, &ido, kh, (format == F_AOUT ? sizeof(struct kgz_aouthdr0) : sizeof(struct kgz_elfhdr)) + sizeof(struct kgz_hdr)); kh->dload &= 0xffffff; kh->entry &= 0xffffff; if (format == F_AOUT) { struct kgz_aouthdr0 ahdr0 = aouthdr0; struct kgz_aouthdr1 ahdr1 = aouthdr1; unsigned x = (sizeof(struct kgz_hdr) + kh->nsize) & (16 - 1); if (x) { x = 16 - x; xzero(&ido, x); } xwrite(&ido, &ahdr1, sizeof(ahdr1)); ahdr0.a.a_data += kh->nsize + x; xseek(&ido, 0); xwrite(&ido, &ahdr0, sizeof(ahdr0)); } else { struct kgz_elfhdr ehdr = elfhdr; ehdr.st[KGZ_ST_KGZ_NDATA].st_size = htole32(kh->nsize); ehdr.sh[KGZ_SH_DATA].sh_size = htole32(le32toh(ehdr.sh[KGZ_SH_DATA].sh_size) + kh->nsize); xseek(&ido, 0); xwrite(&ido, &ehdr, sizeof(ehdr)); } khle = *kh; khle.dload = htole32(khle.dload); khle.dsize = htole32(khle.dsize); khle.isize = htole32(khle.isize); khle.entry = htole32(khle.entry); khle.nsize = htole32(khle.nsize); xwrite(&ido, &khle, sizeof(khle)); xclose(&ido); xclose(&idi); }
inline uint32_t rgph_u32_murmur32s_data32(const void *data, size_t len, uint32_t seed) { const uint32_t *key = data; const uint32_t *end = key + len; uint32_t h[1] = { seed }; for (; key != end; key += 1) rgph_murmur32s_mix(htole32(key[0]), h, 0); rgph_murmur32s_mix(0, h, 1); rgph_murmur32s_finalise(len * sizeof(key[0]), h); return h[0]; }
uint8_t r12a_rx_radiotap_flags(const void *buf) { const struct r92c_rx_stat *stat = buf; uint8_t flags, rate; if (!(stat->rxdw4 & htole32(R12A_RXDW4_SPLCP))) return (0); rate = MS(le32toh(stat->rxdw3), R92C_RXDW3_RATE); if (RTWN_RATE_IS_CCK(rate)) flags = IEEE80211_RADIOTAP_F_SHORTPRE; else flags = IEEE80211_RADIOTAP_F_SHORTGI; return (flags); }
void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr, struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; struct nvme_command *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); cmd = &req->cmd; cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_CREATE_IO_CQ); /* * TODO: create a create io completion queue command data * structure. */ cmd->cdw10 = htole32(((io_que->num_entries-1) << 16) | io_que->id); /* 0x3 = interrupts enabled | physically contiguous */ cmd->cdw11 = htole32((vector << 16) | 0x3); cmd->prp1 = htole64(io_que->cpl_bus_addr); nvme_ctrlr_submit_admin_request(ctrlr, req); }
static int iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta, const uint8_t *addr, uint16_t mac_id, uint16_t color) { struct iwm_mvm_add_sta_cmd cmd; int ret; uint32_t status; memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color)); cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk); cmd.tid_disable_tx = htole16(0xffff); if (addr) IEEE80211_ADDR_COPY(cmd.addr, addr); ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, iwm_mvm_add_sta_cmd_size(sc), &cmd, &status); if (ret) return ret; switch (status & IWM_ADD_STA_STATUS_MASK) { case IWM_ADD_STA_SUCCESS: IWM_DPRINTF(sc, IWM_DEBUG_NODE, "Internal station added.\n"); return 0; default: ret = EIO; device_printf(sc->sc_dev, "Add internal station failed, status=0x%x\n", status); break; } return ret; }
long urndis_encap(struct NepClassEth *ncp, BYTE *m,LONG len ) { struct urndis_packet_msg *msg; msg = (struct urndis_packet_msg *)m; memset(msg, 0, sizeof(*msg)); msg->rm_type = htole32(REMOTE_NDIS_PACKET_MSG); msg->rm_len = htole32(sizeof(*msg) + len); msg->rm_dataoffset = htole32(RNDIS_DATA_OFFSET); msg->rm_datalen = htole32(len); //m_copydata(m, 0, len,((char*)msg + RNDIS_DATA_OFFSET + RNDIS_HEADER_OFFSET)); DB(bug("%s: urndis_encap type 0x%x len %u data(off %u len %u)\n", DEVNAME, letoh32(msg->rm_type), letoh32(msg->rm_len), letoh32(msg->rm_dataoffset), letoh32(msg->rm_datalen))); return(sizeof(*msg)); }
void dbdma_insert_command(dbdma_channel_t *chan, int slot, int command, int stream, bus_addr_t data, size_t count, uint8_t interrupt, uint8_t branch, uint8_t wait, uint32_t branch_slot) { struct dbdma_command cmd; uint32_t *flip; cmd.cmd = command; cmd.key = stream; cmd.intr = interrupt; cmd.branch = branch; cmd.wait = wait; cmd.reqCount = count; cmd.address = (uint32_t)(data); if (command != DBDMA_STORE_QUAD && command != DBDMA_LOAD_QUAD) cmd.cmdDep = chan->sc_slots_pa + branch_slot * sizeof(struct dbdma_command); else cmd.cmdDep = branch_slot; cmd.resCount = 0; cmd.xferStatus = 0; /* * Move quadwords to little-endian. God only knows why * Apple thought this was a good idea. */ flip = (uint32_t *)(&cmd); flip[0] = htole32(flip[0]); flip[1] = htole32(flip[1]); flip[2] = htole32(flip[2]); chan->sc_slots[slot] = cmd; }
void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid, uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg) { struct nvme_request *req; struct nvme_command *cmd; req = nvme_allocate_request_null(cb_fn, cb_arg); cmd = &req->cmd; cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_ABORT); cmd->cdw10 = htole32((cid << 16) | sqid); nvme_ctrlr_submit_admin_request(ctrlr, req); }
int acx111_init_memory(struct acx_softc *sc) { struct acx111_conf_mem mem; struct acx111_conf_meminfo mem_info; struct ifnet *ifp = &sc->sc_ic.ic_if; /* Set memory configuration */ bzero(&mem, sizeof(mem)); mem.sta_max = htole16(ACX111_STA_MAX); mem.memblk_size = htole16(ACX_MEMBLOCK_SIZE); mem.rx_memblk_perc = ACX111_RX_MEMBLK_PERCENT; mem.opt = ACX111_MEMOPT_DEFAULT; mem.xfer_perc = ACX111_XFER_PERCENT; mem.fw_rxring_num = 1; mem.fw_rxring_type = ACX111_RXRING_TYPE_DEFAULT; mem.fw_rxring_prio = ACX111_RXRING_PRIO_DEFAULT; mem.fw_rxdesc_num = ACX_RX_DESC_CNT; mem.h_rxring_paddr = htole32(sc->sc_ring_data.rx_ring_paddr); mem.fw_txring_num = 1; mem.fw_txring_attr = ACX111_TXRING_ATTR_DEFAULT; mem.fw_txdesc_num = ACX_TX_DESC_CNT; if (acx_set_conf(sc, ACX111_CONF_MEM, &mem, sizeof(mem)) != 0) { printf("%s: can't set mem\n", ifp->if_xname); return (1); } /* Get memory configuration */ if (acx_get_conf(sc, ACX111_CONF_MEMINFO, &mem_info, sizeof(mem_info)) != 0) { printf("%s: can't get meminfo\n", ifp->if_xname); return (1); } /* Setup firmware TX descriptor ring */ acx111_init_fw_txring(sc, letoh32(mem_info.fw_txring_start)); /* * There is no need to setup firmware RX descriptor ring, * it is automaticly setup by hardware. */ return (0); }
/* * Write a.out or ELF header. */ static void puthdr(int fd, struct hdr *hdr) { struct exec ex; struct elfh eh; switch (hdr->fmt) { case F_AOUT: memset(&ex, 0, sizeof(ex)); N_SETMAGIC(ex, ZMAGIC, MID_I386, 0); hdr->text = N_ALIGN(ex, hdr->text); ex.a_text = htole32(hdr->text); hdr->data = N_ALIGN(ex, hdr->data); ex.a_data = htole32(hdr->data); ex.a_entry = htole32(hdr->entry); writex(fd, &ex, sizeof(ex)); hdr->size = N_ALIGN(ex, sizeof(ex)); seekx(fd, hdr->size); break; case F_ELF: eh = elfhdr; eh.e.e_entry = htole32(hdr->entry); eh.p[0].p_vaddr = eh.p[0].p_paddr = htole32(hdr->org); eh.p[0].p_filesz = eh.p[0].p_memsz = htole32(hdr->text); eh.p[1].p_offset = htole32(le32toh(eh.p[0].p_offset) + le32toh(eh.p[0].p_filesz)); eh.p[1].p_vaddr = eh.p[1].p_paddr = htole32(align(le32toh(eh.p[0].p_paddr) + le32toh(eh.p[0].p_memsz), 4096)); eh.p[1].p_filesz = eh.p[1].p_memsz = htole32(hdr->data); eh.sh[2].sh_addr = eh.p[0].p_vaddr; eh.sh[2].sh_offset = eh.p[0].p_offset; eh.sh[2].sh_size = eh.p[0].p_filesz; eh.sh[3].sh_addr = eh.p[1].p_vaddr; eh.sh[3].sh_offset = eh.p[1].p_offset; eh.sh[3].sh_size = eh.p[1].p_filesz; writex(fd, &eh, sizeof(eh)); hdr->size = sizeof(eh); } }
/* 'offset' must point to a valid, used block. This function marks * the block unused (by updating the seg_len field) and invalidates * the bitmap. It does NOT do this recursively, so to avoid creating * unreachable used blocks, callers may have to recurse over the hive * structures. Also callers must ensure there are no references to * this block from other parts of the hive. */ static void mark_block_unused (hive_h *h, size_t offset) { assert (h->writable); assert (IS_VALID_BLOCK (h, offset)); DEBUG (2, "marking 0x%zx unused", offset); struct ntreg_hbin_block *blockhdr = (struct ntreg_hbin_block *) ((char *) h->addr + offset); size_t seg_len = block_len (h, offset, NULL); blockhdr->seg_len = htole32 (seg_len); BITMAP_CLR (h->bitmap, offset); }
uint8_t r12a_tx_radiotap_flags(const void *buf) { const struct r12a_tx_desc *txd = buf; uint8_t flags, rate; if (!(txd->txdw5 & htole32(R12A_TXDW5_DATA_SHORT))) return (0); rate = MS(le32toh(txd->txdw4), R12A_TXDW4_DATARATE); if (RTWN_RATE_IS_CCK(rate)) flags = IEEE80211_RADIOTAP_F_SHORTPRE; else flags = IEEE80211_RADIOTAP_F_SHORTGI; return (flags); }
static void x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val) { uint32_t *va; va = x86bios_get_pages(addr, sizeof(*va)); if (va == NULL) x86bios_set_fault(emu, addr); #ifndef __NO_STRICT_ALIGNMENT if ((addr & 3) != 0) le32enc(va, val); else #endif *va = htole32(val); }
static void gridseed_get_queue_length(struct cgpu_info *gridseed, GRIDSEED_INFO *info, unsigned char *data) { uint32_t qlen; memcpy(&qlen, data+8, 4); qlen = htole32(qlen); mutex_lock(&info->qlock); info->query_qlen = false; info->dev_queue_len = GRIDSEED_MCU_QUEUE_LEN - qlen; info->needworks = qlen; cgtimer_time(&info->query_ts); mutex_unlock(&info->qlock); return; }
/*++ * t a p e W r i t e E O M * * Write an end-of-media record to the tape at it's current position and, * optionally, backup the tape to before the newly written record. * * Inputs: * * container - pointer open container file * backup - if 1, position the tape before the new record * * Outputs: * * None * * Returns: * * 1 if EOM record successfully written, 0 otherwise * --*/ int tapeWriteEOM( FILE *container, int backup ) { uint32_t eom = htole32(ST_EOM); if (fwrite(&eom, sizeof(eom), 1, container) != 1) return 0; if (backup) if (fseeko(container, -sizeof(eom), SEEK_CUR) != 0) return 0; return 1; }
int main(int argc, char *argv[]) { plan(12); typedef union { uint16_t value; uint8_t array[2]; } trafo16_t; const uint16_t host16 = 0x0102; const trafo16_t be16 = { .array = { 0x01, 0x02 } }; const trafo16_t le16 = { .array = { 0x02, 0x01 } }; ok(htobe16(host16) == be16.value, "htobe16"); ok(htole16(host16) == le16.value, "htole16"); ok(be16toh(be16.value) == host16, "be16toh"); ok(le16toh(le16.value) == host16, "le16toh"); typedef union { uint32_t value; uint8_t array[4]; } trafo32_t; const uint32_t host32 = 0x01020304; const trafo32_t be32 = { .array = { 0x01, 0x02, 0x03, 0x04 } }; const trafo32_t le32 = { .array = { 0x04, 0x03, 0x02, 0x01 } }; ok(htobe32(host32) == be32.value, "htobe32"); ok(htole32(host32) == le32.value, "htole32"); ok(be32toh(be32.value) == host32, "be32toh"); ok(le32toh(le32.value) == host32, "le32toh"); typedef union { uint64_t value; uint8_t array[8]; } trafo64_t; const uint64_t host64 = 0x0102030405060708; const trafo64_t be64 = { .array = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 } }; const trafo64_t le64 = { .array = { 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01 } }; ok(htobe64(host64) == be64.value, "htobe64"); ok(htole64(host64) == le64.value, "htole64"); ok(be64toh(be64.value) == host64, "be64toh"); ok(le64toh(le64.value) == host64, "le64toh"); return 0; }
int main(int argc, char *argv[]) { union { uint32_t u32; uint8_t arr[4]; }x; x.arr[0] = 0x11; // lowest-address byte x.arr[1] = 0x22; x.arr[2] = 0x33; x.arr[3] = 0x44; // highest-address byte printf("x.u32 = 0x%x\n", x.u32); printf("htole32(x.u32) = 0x%x\n", htole32(x.u32)); printf("htobe32(x.u32) = 0x%x\n", htobe32(x.u32)); return 0; }
static int recover_gpt_hdr(gpt_t gpt, int type, off_t last) { const char *name, *origname; map_t *dgpt, dtbl, sgpt, stbl __unused; struct gpt_hdr *hdr; if (gpt_add_hdr(gpt, type, last) == -1) return -1; switch (type) { case MAP_TYPE_PRI_GPT_HDR: dgpt = &gpt->gpt; dtbl = gpt->tbl; sgpt = gpt->tpg; stbl = gpt->lbt; origname = "secondary"; name = "primary"; break; case MAP_TYPE_SEC_GPT_HDR: dgpt = &gpt->tpg; dtbl = gpt->lbt; sgpt = gpt->gpt; stbl = gpt->tbl; origname = "primary"; name = "secondary"; break; default: gpt_warn(gpt, "Bad table type %d", type); return -1; } memcpy((*dgpt)->map_data, sgpt->map_data, gpt->secsz); hdr = (*dgpt)->map_data; hdr->hdr_lba_self = htole64((uint64_t)(*dgpt)->map_start); hdr->hdr_lba_alt = htole64((uint64_t)sgpt->map_start); hdr->hdr_lba_table = htole64((uint64_t)dtbl->map_start); hdr->hdr_crc_self = 0; hdr->hdr_crc_self = htole32(crc32(hdr, le32toh(hdr->hdr_size))); if (gpt_write(gpt, *dgpt) == -1) { gpt_warnx(gpt, "Writing %s GPT header failed", name); return -1; } gpt_msg(gpt, "Recovered %s GPT header from %s", name, origname); return 0; }
static DBG_HEADER adjust_header(DBG_HEADER header) { switch (sizeof(header)) { case sizeof(uint64_t): /*constant condition */ header = htole64(header); break; case sizeof(uint32_t): /*constant condition */ header = htole32(header); break; case sizeof(uint16_t): /*constant condition */ header = htole16(header); break; default: break; } return header; }
/* Create a completely new lh-record containing just the single node. */ static size_t new_lh_record (hive_h *h, const char *name, hive_node_h node) { static const char id[2] = { 'l', 'h' }; size_t seg_len = sizeof (struct ntreg_lf_record); size_t offset = allocate_block (h, seg_len, id); if (offset == 0) return 0; struct ntreg_lf_record *lh = (struct ntreg_lf_record *) ((char *) h->addr + offset); lh->nr_keys = htole16 (1); lh->keys[0].offset = htole32 (node - 0x1000); calc_hash ("lh", name, lh->keys[0].hash); return offset; }
bool JniJSModulesUnbundle::isUnbundle( AAssetManager *assetManager, const std::string& assetName) { if (!assetManager) { return false; } auto magicFileName = jsModulesDir(assetName) + MAGIC_FILE_NAME; auto asset = openAsset(assetManager, magicFileName.c_str()); if (asset == nullptr) { return false; } magic_number_t fileHeader = 0; AAsset_read(asset.get(), &fileHeader, sizeof(fileHeader)); return fileHeader == htole32(MAGIC_FILE_HEADER); }
static int iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable) { struct iwm_beacon_filter_cmd cmd = { IWM_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = htole32(1), .ba_enable_beacon_abort = htole32(enable), }; if (!sc->sc_bf.bf_enabled) return 0; sc->sc_bf.ba_enabled = enable; iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd); return iwm_mvm_beacon_filter_send_cmd(sc, &cmd); }
static void ath_rx_tap_vendor(struct ifnet *ifp, struct mbuf *m, const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) { struct ath_softc *sc = ifp->if_softc; /* Fill in the extension bitmap */ sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER); /* Fill in the vendor header */ sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f; sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03; sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00; /* XXX what should this be? */ sc->sc_rx_th.wr_vh.vh_sub_ns = 0; sc->sc_rx_th.wr_vh.vh_skip_len = htole16(sizeof(struct ath_radiotap_vendor_hdr)); /* General version info */ sc->sc_rx_th.wr_v.vh_version = 1; sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask; /* rssi */ sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0]; sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1]; sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2]; sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0]; sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1]; sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2]; /* evm */ sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0; sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1; sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2; /* XXX TODO: extend this to include 3-stream EVM */ /* phyerr info */ if (rs->rs_status & HAL_RXERR_PHY) sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr; else sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff; sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status; sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi; }
int main(int argc, char **argv) { const char *header = ""; int nonce = 0; int c; while ((c = getopt (argc, argv, "h:n:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': nonce = atoi(optarg); break; } } char headernonce[HEADERLEN]; u32 hdrlen = strlen(header); memcpy(headernonce, header, hdrlen); memset(headernonce+hdrlen, 0, sizeof(headernonce)-hdrlen); ((u32 *)headernonce)[HEADERLEN/sizeof(u32)-1] = htole32(nonce); siphash_keys keys; setheader(headernonce, sizeof(headernonce), &keys); printf("nonce %d k0 k1 k2 k3 %llx %llx %llx %llx\n", nonce, keys.k0, keys.k1, keys.k2, keys.k3); printf("Verifying size %d proof for cuckaroo%d(\"%s\",%d)\n", PROOFSIZE, EDGEBITS, header, nonce); for (int nsols=0; scanf(" Solution") == 0; nsols++) { word_t nonces[PROOFSIZE]; for (int n = 0; n < PROOFSIZE; n++) { uint64_t nonce; int nscan = scanf(" %" SCNx64, &nonce); assert(nscan == 1); nonces[n] = nonce; } int pow_rc = verify(nonces, &keys); if (pow_rc == POW_OK) { printf("Verified with cyclehash "); unsigned char cyclehash[32]; blake2b((void *)cyclehash, sizeof(cyclehash), (const void *)nonces, sizeof(nonces), 0, 0); for (int i=0; i<32; i++) printf("%02x", cyclehash[i]); printf("\n"); } else { printf("FAILED due to %s\n", errstr[pow_rc]); } } return 0; }
static void net_send_conf_chan(int fd) { struct net_conf_chan nc; nc.proto.version = PROTO_VERSION; nc.proto.type = PROTO_CONF_CHAN; nc.do_change = conf.do_change_channel; nc.upper = conf.channel_max; nc.channel = conf.channel_idx; nc.width_ht40p = conf.channel_width; if (conf.channel_ht40plus) nc.width_ht40p |= NET_WIDTH_HT40PLUS; nc.dwell_time = htole32(conf.channel_time); net_write(fd, (unsigned char *)&nc, sizeof(nc)); }
uv_err_t UVDData::writeU32(uint32_t offset, uint32_t in, uint32_t endianness) { uint32_t data = 0; switch( endianness ) { case UVD_DATA_ENDIAN_BIG: data = htobe32(in); break; case UVD_DATA_ENDIAN_LITTLE: data = htole32(in); break; default: return UV_DEBUG(UV_ERR_GENERAL); } uv_assert_err_ret(writeData(offset, (const char *)&data, sizeof(data))); return UV_ERR_OK; }
int main(int argc, char *argv[]) { union { uint32_t u32; uint8_t arr[4]; } x; x.arr[0] = 0x11; /* Lowest-address byte */ x.arr[1] = 0x22; x.arr[2] = 0x33; x.arr[3] = 0x44; /* Highest-address byte */ printf("x.u32 = 0x%x\n", x.u32); printf("htole32(x.u32) = 0x%x\n", htole32(x.u32)); printf("htobe32(x.u32) = 0x%x\n", htobe32(x.u32)); exit(EXIT_SUCCESS); }
uint8_t* TPM_EVENT_FIELD_logMarshal(const TPM_EVENT_FIELD* val, uint8_t* i_logBuf) { uint32_t* field32 = (uint32_t*)i_logBuf; if (MAX_TPM_LOG_MSG < val->eventSize) { i_logBuf = NULL; } else { *field32 = htole32(val->eventSize); i_logBuf += sizeof(uint32_t); memcpy(i_logBuf, val->event, val->eventSize); i_logBuf += val->eventSize; } return i_logBuf; }