static ssize_t pcap_sg_read(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, uint8_t *packet, size_t len) { ssize_t ret = 0; size_t hdrsize = pcap_get_hdr_length(phdr, type), hdrlen; if (likely(iov[iov_slot].iov_len - iov_off_rd >= hdrsize)) { fmemcpy(&phdr->raw, iov[iov_slot].iov_base + iov_off_rd, hdrsize); iov_off_rd += hdrsize; } else { ret = __pcap_sg_inter_iov_hdr_read(fd, phdr, hdrsize); if (unlikely(ret < 0)) return ret; } hdrlen = pcap_get_length(phdr, type); if (unlikely(hdrlen == 0 || hdrlen > len)) return -EINVAL; if (likely(iov[iov_slot].iov_len - iov_off_rd >= hdrlen)) { fmemcpy(packet, iov[iov_slot].iov_base + iov_off_rd, hdrlen); iov_off_rd += hdrlen; } else { ret = __pcap_sg_inter_iov_data_read(fd, packet, hdrlen); if (unlikely(ret < 0)) return ret; } return hdrsize + hdrlen; }
static ssize_t __pcap_sg_inter_iov_data_read(int fd, uint8_t *packet, size_t hdrlen) { int ret; size_t offset = 0; ssize_t remainder; offset = iov[iov_slot].iov_len - iov_off_rd; remainder = hdrlen - offset; if (remainder < 0) remainder = 0; bug_on(offset + remainder != hdrlen); fmemcpy(packet, iov[iov_slot].iov_base + iov_off_rd, offset); iov_off_rd = 0; iov_slot++; if (iov_slot == array_size(iov)) { iov_slot = 0; ret = readv(fd, iov, array_size(iov)); if (unlikely(ret <= 0)) return -EIO; } fmemcpy(packet + offset, iov[iov_slot].iov_base + iov_off_rd, remainder); iov_off_rd += remainder; return hdrlen; }
static ssize_t pcap_mmap_read_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { ssize_t ret; spinlock_lock(&lock); if (unlikely((off_t) (pcurr + sizeof(*hdr) - pstart) > map_size)) { spinlock_unlock(&lock); return -ENOMEM; } fmemcpy(hdr, pcurr, sizeof(*hdr)); pcurr += sizeof(*hdr); if (unlikely((off_t) (pcurr + hdr->caplen - pstart) > map_size)) { ret = -ENOMEM; goto out_err; } if (unlikely(hdr->caplen == 0 || hdr->caplen > len)) { ret = -EINVAL; /* Bogus packet */ goto out_err; } fmemcpy(packet, pcurr, hdr->caplen); pcurr += hdr->caplen; spinlock_unlock(&lock); return sizeof(*hdr) + hdr->caplen; out_err: spinlock_unlock(&lock); return ret; }
static ssize_t __pcap_sg_inter_iov_hdr_read(int fd, pcap_pkthdr_t *phdr, size_t hdrsize) { int ret; size_t offset = 0; ssize_t remainder; offset = iov[iov_slot].iov_len - iov_off_rd; remainder = hdrsize - offset; if (remainder < 0) remainder = 0; bug_on(offset + remainder != hdrsize); fmemcpy(&phdr->raw, iov[iov_slot].iov_base + iov_off_rd, offset); iov_off_rd = 0; iov_slot++; if (iov_slot == array_size(iov)) { iov_slot = 0; ret = readv(fd, iov, array_size(iov)); if (unlikely(ret <= 0)) return -EIO; } fmemcpy(&phdr->raw + offset, iov[iov_slot].iov_base + iov_off_rd, remainder); iov_off_rd += remainder; return hdrsize; }
static void do_rd_request(void) { register char *buff; rd_sector_t start; /* absolute offset from start of device */ seg_t segnum; /* segment index; segment = rd_segment[segnum].segment */ segext_t offset; /* relative offset (from start of segment) */ int target; while (1) { if (!CURRENT || CURRENT->rq_dev < 0) return; INIT_REQUEST; if (CURRENT == NULL || CURRENT->rq_sector == (sector_t) - 1) return; if (rd_initialised != 1) { end_request(0, CURRENT->rq_dev); continue; } start = (rd_sector_t) CURRENT->rq_sector; buff = CURRENT->rq_buffer; target = DEVICE_NR(CURRENT->rq_dev); debug2("RD: request target: %d, start: %ld\n", target, (long) start); if ((rd_info[target].flags != RD_BUSY) || (start >= rd_info[target].size)) { debug4("RD: bad request on ram%d, flags: %d, size: %d, start: %d\n", target, rd_info[target].flags, rd_info[target].size, start); end_request(0, CURRENT->rq_dev); continue; } offset = start; /* offset from segment start */ segnum = rd_info[target].index; /* we want to know our starting index nr. */ debug1("RD: request index = %d\n", segnum); while (offset > rd_segment[segnum].seg_size) { offset -= rd_segment[segnum].seg_size; /* recalculate offset */ segnum = rd_segment[segnum].next; /* point to next segment in linked list */ } debug5("RD: request entry = %d, segment = 0x%x, offset = %d (%x %x)\n", segnum, rd_segment[segnum].segment, offset, CURRENT->rq_seg, buff); if (CURRENT->rq_cmd == WRITE) { debug1("RD: request writing to %ld\n", (long) start); fmemcpy(rd_segment[segnum].segment, offset * SECTOR_SIZE, CURRENT->rq_seg, buff, 1024); } if (CURRENT->rq_cmd == READ) { debug1("RD_REQUEST reading from %ld\n", start); fmemcpy(CURRENT->rq_seg, buff, rd_segment[segnum].segment, offset * SECTOR_SIZE, 1024); } end_request(1, CURRENT->rq_dev); } }
void bpf_parse_rules(char *rulefile, struct sock_fprog *bpf, uint32_t link_type) { int ret; char buff[256]; struct sock_filter sf_single = { 0x06, 0, 0, 0xFFFFFFFF }; FILE *fp; if (rulefile == NULL) { bpf->len = 1; bpf->filter = xmalloc(sizeof(sf_single)); fmemcpy(&bpf->filter[0], &sf_single, sizeof(sf_single)); return; } fp = fopen(rulefile, "r"); if (!fp) { bpf_try_compile(rulefile, bpf, link_type); return; } fmemset(buff, 0, sizeof(buff)); while (fgets(buff, sizeof(buff), fp) != NULL) { buff[sizeof(buff) - 1] = 0; if (buff[0] != '{') { fmemset(buff, 0, sizeof(buff)); continue; } fmemset(&sf_single, 0, sizeof(sf_single)); ret = sscanf(buff, "{ 0x%x, %u, %u, 0x%08x },", (unsigned int *) &sf_single.code, (unsigned int *) &sf_single.jt, (unsigned int *) &sf_single.jf, (unsigned int *) &sf_single.k); if (unlikely(ret != 4)) panic("BPF syntax error!\n"); bpf->len++; bpf->filter = xrealloc(bpf->filter, 1, bpf->len * sizeof(sf_single)); fmemcpy(&bpf->filter[bpf->len - 1], &sf_single, sizeof(sf_single)); fmemset(buff, 0, sizeof(buff)); } fclose(fp); if (unlikely(__bpf_validate(bpf) == 0)) panic("This is not a valid BPF program!\n"); }
// GetModuleHandle // GetProcAddress // VirtualProtect // memcpy BOOL WINAPI IC_HookAPI(FARPROC pFunc, PROC pfNew, PBYTE pOrgBytes) { DWORD dwOldProtect = 0, dwAddress = 0; BYTE pBuf[5]; PBYTE pByte = NULL; t_fmemcpy fmemcpy = NULL; t_fVirtualProtect fVirtualProtect = NULL; t_fGetProcAddress fGetProcAddress = NULL; t_fGetModuleHandle fGetModuleHandle = NULL; DWORD *pFuncPtr = NULL; _asm MOV pFuncPtr, EAX pFuncPtr -= 2; fmemcpy = (t_fmemcpy)*pFuncPtr; pFuncPtr--; fVirtualProtect = (t_fVirtualProtect)*pFuncPtr; pFuncPtr--; fGetProcAddress = (t_fGetProcAddress)*pFuncPtr; pFuncPtr--; fGetModuleHandle = (t_fGetModuleHandle)*pFuncPtr; pByte = (PBYTE)pFunc; if( !fVirtualProtect((LPVOID)pFunc, 5, PAGE_EXECUTE_READWRITE, &dwOldProtect) ) { return FALSE; } pBuf[0] = 0xE9; fmemcpy(pOrgBytes, pFunc, 5); dwAddress = (DWORD)pfNew - (DWORD)pFunc - 5; fmemcpy(&pBuf[1], &dwAddress, 4); fmemcpy(pFunc, pBuf, 5); if( !fVirtualProtect((LPVOID)pFunc, 5, dwOldProtect, &dwOldProtect) ) { return FALSE; } return TRUE; }
static ssize_t pcap_mm_write(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, const uint8_t *packet, size_t len) { size_t hdrsize = pcap_get_hdr_length(phdr, type); if ((off_t) (ptr_va_curr - ptr_va_start) + hdrsize + len > map_size) __pcap_mmap_write_need_remap(fd); fmemcpy(ptr_va_curr, &phdr->raw, hdrsize); ptr_va_curr += hdrsize; fmemcpy(ptr_va_curr, packet, len); ptr_va_curr += len; return hdrsize + len; }
ssize_t curve25519_encode(struct curve25519_struct *curve, struct curve25519_proto *proto, unsigned char *plaintext, size_t size, unsigned char **chipertext) { int ret, i; ssize_t done = size; struct taia packet_taia; spinlock_lock(&curve->enc_lock); if (unlikely(size > curve->enc_buf_size)) { done = -ENOMEM; goto out; } taia_now(&packet_taia); taia_pack(proto->enonce + NONCE_OFFSET, &packet_taia); memset(curve->enc_buf, 0, curve->enc_buf_size); ret = crypto_box_afternm(curve->enc_buf, plaintext, size, proto->enonce, proto->key); if (unlikely(ret)) { done = -EIO; goto out; } fmemcpy(curve->enc_buf + crypto_box_boxzerobytes - NONCE_LENGTH, proto->enonce + NONCE_OFFSET, NONCE_LENGTH); for (i = 0; i < crypto_box_boxzerobytes - NONCE_LENGTH; ++i) curve->enc_buf[i] = (uint8_t) secrand(); (*chipertext) = curve->enc_buf; out: spinlock_unlock(&curve->enc_lock); return done; }
static void apply_csum16(int csum_id) { int j, i = csum_id; size_t csum_max = packet_dyn[i].slen; for (j = 0; j < csum_max; ++j) { uint16_t sum = 0; struct csum16 *csum = &packet_dyn[i].csum[j]; fmemset(&packets[i].payload[csum->off], 0, sizeof(sum)); switch (csum->which) { case CSUM_IP: if (csum->to >= packets[i].len) csum->to = packets[i].len - 1; sum = calc_csum(packets[i].payload + csum->from, csum->to - csum->from + 1, 0); break; case CSUM_UDP: sum = p4_csum((void *) packets[i].payload + csum->from, packets[i].payload + csum->to, (packets[i].len - csum->to), IPPROTO_UDP); break; case CSUM_TCP: sum = p4_csum((void *) packets[i].payload + csum->from, packets[i].payload + csum->to, (packets[i].len - csum->to), IPPROTO_TCP); break; } fmemcpy(&packets[i].payload[csum->off], &sum, sizeof(sum)); } }
seg_t mm_dup(seg_t base) { register struct malloc_hole *o, *m; size_t i; debug("MALLOC: mm_dup()\n"); o = find_hole(&memmap, base); if (o->flags != HOLE_USED) panic("bad/swapped hole"); #ifdef CONFIG_SWAP while ((m = best_fit_hole(&memmap, o->extent)) == NULL) { seg_t s = swap_strategy(NULL); if (!s || swap_out(s) == -1) return NULL; } #else m = best_fit_hole(&memmap, o->extent); if (m == NULL) return NULL; #endif split_hole(&memmap, m, o->extent); m->flags = HOLE_USED; m->refcount = 1; i = (o->extent << 4); fmemcpy(m->page_base, 0, o->page_base, 0, (__u16) i); return m->page_base; }
static inline void _header_item_dup(struct fmap_node *n, void *item) { _header_item_free(n->value); void *item_target = fmalloc(sizeof(struct http_header_item)); fmemcpy(item_target, item, sizeof(struct http_header_item)); n->value = item_target; }
/* called by handle func READ (int21/ah=3f) */ size_t read_line_handle(int sft_idx, size_t n, char FAR * bp) { size_t chars_left; if (inputptr == NULL) { /* can we reuse kb_buf or was it overwritten? */ if (kb_buf.kb_size != LINEBUFSIZECON) { kb_buf.kb_count = 0; kb_buf.kb_size = LINEBUFSIZECON; } read_line(sft_idx, sft_idx, &kb_buf); kb_buf.kb_buf[kb_buf.kb_count + 1] = echo_char(LF, sft_idx); inputptr = kb_buf.kb_buf; if (*inputptr == CTL_Z) { inputptr = NULL; return 0; } } chars_left = &kb_buf.kb_buf[kb_buf.kb_count + 2] - inputptr; if (n > chars_left) n = chars_left; fmemcpy(bp, inputptr, n); inputptr += n; if (n == chars_left) inputptr = NULL; return n; }
void movebda(size_t bytes, unsigned new_seg) { unsigned old_seg = peek(0, EBDASEG); fmemcpy(MK_FP(new_seg, 0), MK_FP(old_seg, 0), bytes); poke(0, EBDASEG, new_seg); poke(0, RAMSIZE, ram_top); }
BOOL FcbFindNext(xfcb FAR * lpXfcb) { BYTE FAR *lpDir; COUNT FcbDrive; psp FAR *lpPsp = MK_FP(cu_psp, 0); /* First, move the dta to a local and change it around to match */ /* our functions. */ lpDir = (BYTE FAR *) dta; dta = (BYTE FAR *) & Dmatch; /* Next initialze local variables by moving them from the fcb */ lpFcb = CommonFcbInit(lpXfcb, SecPathName, &FcbDrive); /* Reconstrct the dirmatch structure from the fcb */ Dmatch.dm_drive = lpFcb->fcb_sftno; fbcopy(lpFcb->fcb_fname, (BYTE FAR *) Dmatch.dm_name_pat, FNAME_SIZE + FEXT_SIZE); DosUpFMem((BYTE FAR *) Dmatch.dm_name_pat, FNAME_SIZE + FEXT_SIZE); Dmatch.dm_attr_srch = wAttr; Dmatch.dm_entry = lpFcb->fcb_strtclst; Dmatch.dm_dircluster = lpFcb->fcb_dirclst; if ((xfcb FAR *) lpFcb != lpXfcb) { wAttr = lpXfcb->xfcb_attrib; fbcopy(lpXfcb, lpDir, 7); lpDir += 7; } else wAttr = D_ALL; if (DosFindNext() != SUCCESS) { dta = lpPsp->ps_dta; CritErrCode = 0x12; return FALSE; } *lpDir++ = FcbDrive; fmemcpy((struct dirent FAR *)lpDir, &SearchDir, sizeof(struct dirent)); lpFcb->fcb_dirclst = (UWORD)Dmatch.dm_dircluster; lpFcb->fcb_strtclst = Dmatch.dm_entry; lpFcb->fcb_sftno = Dmatch.dm_drive; #if 0 lpFcb->fcb_cublock = Dmatch.dm_entry; lpFcb->fcb_cublock *= 0x100; lpFcb->fcb_cublock += wAttr; #endif dta = lpPsp->ps_dta; return TRUE; }
/* * Copy a buffer and test the size of the buffer * Returns SUCCESS on success; DE_INVLDFUNC on failure * * Efficiency note: This function is used as: * return cpyBuf(buf, bufsize, ...) * three times. If the code optimizer is some good, it can re-use * the code to push bufsize, buf, call cpyBuf() and return its result. * The parameter were ordered to allow this code optimization. */ STATIC COUNT cpyBuf(VOID FAR * dst, UWORD dstlen, VOID FAR * src, UWORD srclen) { if (srclen <= dstlen) { fmemcpy(dst, src, srclen); return SUCCESS; } return DE_INVLDFUNC; /* buffer too small */ }
static ssize_t pcap_mmap_write_pcap_pkt(int fd, struct pcap_pkthdr *hdr, uint8_t *packet, size_t len) { int ret; off_t pos; spinlock_lock(&lock); if ((off_t) (pcurr - pstart) + sizeof(*hdr) + len > map_size) { off_t map_size_old = map_size; off_t offset = (pcurr - pstart); map_size = PAGE_ALIGN(map_size_old * 10 / 8); pos = lseek(fd, map_size, SEEK_SET); if (pos < 0) panic("Cannot lseek pcap file!\n"); ret = write_or_die(fd, "", 1); if (ret != 1) panic("Cannot write file!\n"); pstart = mremap(pstart, map_size_old, map_size, MREMAP_MAYMOVE); if (pstart == MAP_FAILED) panic("mmap of file failed!"); ret = madvise(pstart, map_size, MADV_SEQUENTIAL); if (ret < 0) panic("Failed to give kernel mmap advise!\n"); pcurr = pstart + offset; } fmemcpy(pcurr, hdr, sizeof(*hdr)); pcurr += sizeof(*hdr); fmemcpy(pcurr, packet, len); pcurr += len; spinlock_unlock(&lock); return sizeof(*hdr) + len; }
/* this receives a name in 11 char field NAME+EXT and builds a zeroterminated string unfortunately, blanks are allowed in filenames. like "test e", " test .y z",... so we have to work from the last blank backward */ void ConvertName83ToNameSZ(BYTE FAR *destSZ, BYTE FAR *srcFCBName) { int loop; int noExtension = FALSE; if (*srcFCBName == '.') { noExtension = TRUE; } fmemcpy(destSZ,srcFCBName,FNAME_SIZE); srcFCBName += FNAME_SIZE; for (loop = FNAME_SIZE; --loop >= 0; ) { if (destSZ[loop] != ' ') break; } destSZ += loop + 1; if (!noExtension) /* not for ".", ".." */ { for (loop = FEXT_SIZE; --loop >= 0; ) { if (srcFCBName[loop] != ' ') break; } if (loop >= 0) { *destSZ++ = '.'; fmemcpy(destSZ,srcFCBName,loop+1); destSZ += loop+1; } } *destSZ = '\0'; }
/* this receives a name in 11 char field NAME+EXT and builds a zeroterminated string unfortunately, blanks are allowed in filenames. like "test e", " test .y z",... so we have to work from the last blank backward */ void ConvertName83ToNameSZ(BYTE FAR * destSZ, BYTE FAR * srcFCBName) { int loop; fmemcpy(destSZ, srcFCBName, FNAME_SIZE); for (loop = FNAME_SIZE; --loop >= 0 && destSZ[loop] == ' '; ) ; destSZ += ++loop; if (*srcFCBName != '.') /* not for ".", ".." */ { srcFCBName += FNAME_SIZE; for (loop = FEXT_SIZE; --loop >= 0 && srcFCBName[loop] == ' '; ) ; if (++loop > 0) { *destSZ++ = '.'; fmemcpy(destSZ, srcFCBName, loop); destSZ += loop; } } *destSZ = '\0'; }
static ssize_t pcap_mm_read(int fd __maybe_unused, pcap_pkthdr_t *phdr, enum pcap_type type, uint8_t *packet, size_t len) { size_t hdrsize = pcap_get_hdr_length(phdr, type), hdrlen; if (unlikely((off_t) (ptr_va_curr + hdrsize - ptr_va_start) > (off_t) map_size)) return -EIO; fmemcpy(&phdr->raw, ptr_va_curr, hdrsize); ptr_va_curr += hdrsize; hdrlen = pcap_get_length(phdr, type); if (unlikely((off_t) (ptr_va_curr + hdrlen - ptr_va_start) > (off_t) map_size)) return -EIO; if (unlikely(hdrlen == 0 || hdrlen > len)) return -EINVAL; fmemcpy(packet, ptr_va_curr, hdrlen); ptr_va_curr += hdrlen; return hdrsize + hdrlen; }
static ssize_t pcap_sg_write(int fd, pcap_pkthdr_t *phdr, enum pcap_type type, const uint8_t *packet, size_t len) { ssize_t ret, hdrsize = pcap_get_hdr_length(phdr, type); if (unlikely(iov_slot == array_size(iov))) { ret = writev(fd, iov, array_size(iov)); if (ret < 0) panic("Writev I/O error: %s!\n", strerror(errno)); iov_slot = 0; } fmemcpy(iov[iov_slot].iov_base, &phdr->raw, hdrsize); iov[iov_slot].iov_len = hdrsize; fmemcpy(iov[iov_slot].iov_base + iov[iov_slot].iov_len, packet, len); ret = (iov[iov_slot].iov_len += len); iov_slot++; return ret; }
static inline void _header_map_add(struct fmap *header_map, char *key, char *val) { struct http_header_item *header_item = fmap_getvalue(defined_header_map, key); if (header_item == NULL) return; struct http_header_item *item_target = fmalloc(sizeof(struct http_header_item)); fmemcpy(item_target, header_item, sizeof(struct http_header_item)); size_t val_len = strlen(val); item_target->val_parse_func = header_item->val_parse_func; item_target->val = fmalloc(val_len + 1); cpystr(item_target->val, val, strlen(val)); fmap_add(header_map, key, item_target); }
/* mode = LOAD or EXECUTE ep = EXE block lp = filename to load (string) leb = local copy of exe block */ COUNT DosExec(COUNT mode, exec_blk FAR * ep, BYTE FAR * lp) { COUNT rc; exec_blk leb; /* BYTE FAR *cp;*/ BOOL bIsCom = FALSE; fmemcpy(&leb, ep, sizeof(exec_blk)); /* If file not found - free ram and return error */ if ((rc = DosOpen(lp, 0)) < 0) { return DE_FILENOTFND; } if (DosRead(rc, sizeof(exe_header), (VOID FAR *) & header, &UnusedRetVal) != sizeof(exe_header)) { bIsCom = TRUE; } DosClose(rc); if (bIsCom || header.exSignature != MAGIC) { rc = DosComLoader(lp, &leb, mode); } else { rc = DosExeLoader(lp, &leb, mode); } if (mode == LOAD && rc == SUCCESS) fmemcpy(ep, &leb, sizeof(exec_blk)); return rc; }
BOOL FcbFindFirst(xfcb FAR * lpXfcb) { BYTE FAR *lpDir; COUNT FcbDrive; psp FAR *lpPsp = MK_FP(cu_psp, 0); /* First, move the dta to a local and change it around to match */ /* our functions. */ lpDir = (BYTE FAR *) dta; dta = (BYTE FAR *) & Dmatch; /* Next initialze local variables by moving them from the fcb */ lpFcb = CommonFcbInit(lpXfcb, SecPathName, &FcbDrive); if (lpXfcb->xfcb_flag == 0xff) { wAttr = lpXfcb->xfcb_attrib; fbcopy(lpXfcb, lpDir, 7); lpDir += 7; } else wAttr = D_ALL; if (DosFindFirst(wAttr, SecPathName) != SUCCESS) { dta = lpPsp->ps_dta; return FALSE; } *lpDir++ = FcbDrive; fmemcpy(lpDir, &SearchDir, sizeof(struct dirent)); lpFcb->fcb_dirclst = (UWORD)Dmatch.dm_dircluster; lpFcb->fcb_strtclst = Dmatch.dm_entry; /* This is undocumented and seen using Pcwatch and Ramview. The First byte is the current directory count and the second seems to be the attribute byte. */ lpFcb->fcb_sftno = Dmatch.dm_drive; /* MSD seems to save this @ fcb_date.*/ #if 0 lpFcb->fcb_cublock = Dmatch.dm_entry; lpFcb->fcb_cublock *= 0x100; lpFcb->fcb_cublock += wAttr; #endif dta = lpPsp->ps_dta; return TRUE; }
/* * Resize a hole */ struct malloc_hole *mm_resize(struct malloc_hole *m, segext_t pages) { register struct malloc_hole *next; register segext_t ext; seg_t base; if(m->extent >= pages){ /* for now don't reduce holes */ return m; } next = m->next; ext = pages - m->extent; if(next->flags == HOLE_FREE && next->extent >= ext){ m->extent += ext; next->extent -= ext; next->page_base += ext; if(next->extent == 0){ next->flags == HOLE_SPARE; m->next = next->next; } return m; } #ifdef CONFIG_ADVANCED_MM base = mm_alloc(pages); if(!next){ return NULL; /* Out of luck */ } fmemcpy(base, 0, m->page_base, 0, (__u16)(m->extent << 4)); next = find_hole(&memmap, base); next->refcount = m->refcount; m->flags = HOLE_FREE; sweep_holes(&memmap); return next; #else return NULL; #endif }
// GetModuleHandle // GetProcAddress // VirtualProtect // memcpy BOOL WINAPI IC_UnhookAPI(FARPROC pFunc, PBYTE pOrgBytes) { DWORD dwOldProtect = 0; PBYTE pByte = NULL; HMODULE hMod = NULL; t_fmemcpy fmemcpy = NULL; t_fVirtualProtect fVirtualProtect = NULL; t_fGetProcAddress fGetProcAddress = NULL; t_fGetModuleHandle fGetModuleHandle = NULL; DWORD *pFuncPtr = NULL; _asm MOV pFuncPtr, EAX pFuncPtr -= 2; fmemcpy = (t_fmemcpy)*pFuncPtr; pFuncPtr--; fVirtualProtect = (t_fVirtualProtect)*pFuncPtr; pFuncPtr--; fGetProcAddress = (t_fGetProcAddress)*pFuncPtr; pFuncPtr--; fGetModuleHandle = (t_fGetModuleHandle)*pFuncPtr; pByte = (PBYTE)pFunc; if( !fVirtualProtect((LPVOID)pFunc, 5, PAGE_EXECUTE_READWRITE, &dwOldProtect) ) { return FALSE; } fmemcpy(pFunc, pOrgBytes, 5); if( !fVirtualProtect((LPVOID)pFunc, 5, dwOldProtect, &dwOldProtect) ) { return FALSE; } return TRUE; }
void map_buffer(register struct buffer_head *bh) { int i; /* If buffer is already mapped, just increase the refcount and return */ debug2("mapping buffer %d (%d)\n", bh->b_num, bh->b_mapcount); if (bh->b_data || bh->b_seg != kernel_ds) { #ifdef DEBUG if (!bh->b_mapcount) { debug("BUFMAP: Buffer %d (block %d) `remapped' into L1.\n", bh->b_num, bh->b_blocknr); } #endif bh->b_mapcount++; return; } /* else keep trying till we succeed */ for (;;) { /* First check for the trivial case */ for (i = 0; i < NR_MAPBUFS; i++) { if (!bufmem_map[i]) { /* We can just map here! */ bufmem_map[i] = bh; #ifdef DMA_ALN bh->b_data = bufmem_i + (i << BLOCK_SIZE_BITS); #else bh->b_data = (char *)bufmem + (i << BLOCK_SIZE_BITS); #endif bh->b_mapcount++; if(bh->b_uptodate) fmemcpy(kernel_ds, (__u16) bh->b_data, _buf_ds, (__u16) (bh->b_num * BLOCK_SIZE), BLOCK_SIZE); debug3("BUFMAP: Buffer %d (block %d) mapped into L1 slot %d.\n", bh->b_num, bh->b_blocknr, i); return; } } /* Now, we check for a mapped buffer with no count and then * hopefully find one to send back to L2 */ for (i = (lastumap + 1) % NR_MAPBUFS; i != lastumap; i = ((i + 1) % NR_MAPBUFS)) { debug1("BUFMAP: trying slot %d\n", i); if (!bufmem_map[i]->b_mapcount) { debug1("BUFMAP: Buffer %d unmapped from L1\n", bufmem_map[i]->b_num); /* Now unmap it */ fmemcpy(_buf_ds, (__u16) (bufmem_map[i]->b_num * BLOCK_SIZE), kernel_ds, (__u16) bufmem_map[i]->b_data, BLOCK_SIZE); bufmem_map[i]->b_data = 0; bufmem_map[i] = 0; break; } } /* The last case is to wait until unmap gets a b_mapcount down to 0 */ if (i == lastumap) { /* previous loop failed */ debug1("BUFMAP: buffer #%d waiting on L1 slot\n", bh->b_num); sleep_on(&bufmapwait); debug("BUFMAP: wait queue woken up...\n"); } else { /* success */ lastumap = i; } } }
static void receive_to_xmit(struct ctx *ctx) { short ifflags = 0; uint8_t *in, *out; int rx_sock, ifindex_in, ifindex_out; unsigned int size_in, size_out, it_in = 0, it_out = 0; unsigned long frame_count = 0; struct frame_map *hdr_in, *hdr_out; struct ring tx_ring, rx_ring; struct pollfd rx_poll; struct sock_fprog bpf_ops; if (!strncmp(ctx->device_in, ctx->device_out, IFNAMSIZ)) panic("Ingress/egress devices must be different!\n"); if (!device_up_and_running(ctx->device_out)) panic("Egress device not up and running!\n"); rx_sock = pf_socket(); tx_sock = pf_socket(); fmemset(&tx_ring, 0, sizeof(tx_ring)); fmemset(&rx_ring, 0, sizeof(rx_ring)); fmemset(&rx_poll, 0, sizeof(rx_poll)); fmemset(&bpf_ops, 0, sizeof(bpf_ops)); ifindex_in = device_ifindex(ctx->device_in); ifindex_out = device_ifindex(ctx->device_out); size_in = ring_size(ctx->device_in, ctx->reserve_size); size_out = ring_size(ctx->device_out, ctx->reserve_size); enable_kernel_bpf_jit_compiler(); bpf_parse_rules(ctx->filter, &bpf_ops, ctx->link_type); if (ctx->dump_bpf) bpf_dump_all(&bpf_ops); bpf_attach_to_sock(rx_sock, &bpf_ops); setup_rx_ring_layout(rx_sock, &rx_ring, size_in, ctx->jumbo, false); create_rx_ring(rx_sock, &rx_ring, ctx->verbose); mmap_rx_ring(rx_sock, &rx_ring); alloc_rx_ring_frames(rx_sock, &rx_ring); bind_rx_ring(rx_sock, &rx_ring, ifindex_in); prepare_polling(rx_sock, &rx_poll); set_packet_loss_discard(tx_sock); setup_tx_ring_layout(tx_sock, &tx_ring, size_out, ctx->jumbo); create_tx_ring(tx_sock, &tx_ring, ctx->verbose); mmap_tx_ring(tx_sock, &tx_ring); alloc_tx_ring_frames(tx_sock, &tx_ring); bind_tx_ring(tx_sock, &tx_ring, ifindex_out); dissector_init_all(ctx->print_mode); if (ctx->promiscuous) ifflags = enter_promiscuous_mode(ctx->device_in); if (ctx->kpull) interval = ctx->kpull; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); printf("Running! Hang up with ^C!\n\n"); fflush(stdout); while (likely(sigint == 0)) { while (user_may_pull_from_rx(rx_ring.frames[it_in].iov_base)) { __label__ next; hdr_in = rx_ring.frames[it_in].iov_base; in = ((uint8_t *) hdr_in) + hdr_in->tp_h.tp_mac; frame_count++; if (ctx->packet_type != -1) if (ctx->packet_type != hdr_in->s_ll.sll_pkttype) goto next; hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); for (; !user_may_pull_from_tx(tx_ring.frames[it_out].iov_base) && likely(!sigint);) { if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } hdr_out = tx_ring.frames[it_out].iov_base; out = ((uint8_t *) hdr_out) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); } tpacket_hdr_clone(&hdr_out->tp_h, &hdr_in->tp_h); fmemcpy(out, in, hdr_in->tp_h.tp_len); kernel_may_pull_from_tx(&hdr_out->tp_h); if (ctx->randomize) next_rnd_slot(&it_out, &tx_ring); else { it_out++; if (it_out >= tx_ring.layout.tp_frame_nr) it_out = 0; } show_frame_hdr(hdr_in, ctx->print_mode); dissector_entry_point(in, hdr_in->tp_h.tp_snaplen, ctx->link_type, ctx->print_mode); if (frame_count_max != 0) { if (frame_count >= frame_count_max) { sigint = 1; break; } } next: kernel_may_pull_from_rx(&hdr_in->tp_h); it_in++; if (it_in >= rx_ring.layout.tp_frame_nr) it_in = 0; if (unlikely(sigint == 1)) goto out; } poll(&rx_poll, 1, -1); } out: timer_purge(); sock_rx_net_stats(rx_sock, 0); bpf_release(&bpf_ops); dissector_cleanup_all(); destroy_tx_ring(tx_sock, &tx_ring); destroy_rx_ring(rx_sock, &rx_ring); if (ctx->promiscuous) leave_promiscuous_mode(ctx->device_in, ifflags); close(tx_sock); close(rx_sock); }
static void xmit_slowpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ret, icmp_sock = -1; unsigned long num = 1, i = 0; struct timeval start, end, diff; unsigned long long tx_bytes = 0, tx_packets = 0; struct packet_dyn *pktd; struct sockaddr_ll saddr = { .sll_family = PF_PACKET, .sll_halen = ETH_ALEN, .sll_ifindex = device_ifindex(ctx->device), }; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; if (ctx->smoke_test) icmp_sock = xmit_smoke_setup(ctx); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } retry: ret = sendto(sock, packets[i].payload, packets[i].len, 0, (struct sockaddr *) &saddr, sizeof(saddr)); if (unlikely(ret < 0)) { if (errno == ENOBUFS) { sched_yield(); goto retry; } panic("Sendto error: %s!\n", strerror(errno)); } tx_bytes += packets[i].len; tx_packets++; if (ctx->smoke_test) { ret = xmit_smoke_probe(icmp_sock, ctx); if (unlikely(ret < 0)) { printf("%sSmoke test alert:%s\n", colorize_start(bold), colorize_end()); printf(" Remote host seems to be unresponsive to ICMP probes!\n"); printf(" Last instance was packet%lu, seed:%u, trafgen snippet:\n\n", i, seed); dump_trafgen_snippet(packets[i].payload, packets[i].len); break; } } if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; if (ctx->num > 0) num--; if (ctx->gap > 0) usleep(ctx->gap); } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); if (ctx->smoke_test) close(icmp_sock); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; } static void xmit_fastpath_or_die(struct ctx *ctx, int cpu, unsigned long orig_num) { int ifindex = device_ifindex(ctx->device); uint8_t *out = NULL; unsigned int it = 0; unsigned long num = 1, i = 0, size; struct ring tx_ring; struct frame_map *hdr; struct timeval start, end, diff; struct packet_dyn *pktd; unsigned long long tx_bytes = 0, tx_packets = 0; fmemset(&tx_ring, 0, sizeof(tx_ring)); size = ring_size(ctx->device, ctx->reserve_size); set_sock_prio(sock, 512); set_packet_loss_discard(sock); setup_tx_ring_layout(sock, &tx_ring, size, ctx->jumbo_support); create_tx_ring(sock, &tx_ring, ctx->verbose); mmap_tx_ring(sock, &tx_ring); alloc_tx_ring_frames(sock, &tx_ring); bind_tx_ring(sock, &tx_ring, ifindex); drop_privileges(ctx->enforce, ctx->uid, ctx->gid); if (ctx->kpull) interval = ctx->kpull; if (ctx->num > 0) num = ctx->num; if (ctx->num == 0 && orig_num > 0) num = 0; set_itimer_interval_value(&itimer, 0, interval); setitimer(ITIMER_REAL, &itimer, NULL); bug_on(gettimeofday(&start, NULL)); while (likely(sigint == 0) && likely(num > 0) && likely(plen > 0)) { while (user_may_pull_from_tx(tx_ring.frames[it].iov_base) && likely(num > 0)) { hdr = tx_ring.frames[it].iov_base; out = ((uint8_t *) hdr) + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); hdr->tp_h.tp_snaplen = packets[i].len; hdr->tp_h.tp_len = packets[i].len; pktd = &packet_dyn[i]; if (pktd->clen + pktd->rlen + pktd->slen) { apply_counter(i); apply_randomizer(i); apply_csum16(i); } fmemcpy(out, packets[i].payload, packets[i].len); tx_bytes += packets[i].len; tx_packets++; if (!ctx->rand) { i++; if (i >= plen) i = 0; } else i = rand() % plen; kernel_may_pull_from_tx(&hdr->tp_h); it++; if (it >= tx_ring.layout.tp_frame_nr) it = 0; if (ctx->num > 0) num--; if (unlikely(sigint == 1)) break; } } bug_on(gettimeofday(&end, NULL)); timersub(&end, &start, &diff); timer_purge(); destroy_tx_ring(sock, &tx_ring); stats[cpu].tx_packets = tx_packets; stats[cpu].tx_bytes = tx_bytes; stats[cpu].tv_sec = diff.tv_sec; stats[cpu].tv_usec = diff.tv_usec; stats[cpu].state |= CPU_STATS_STATE_RES; }
static void do_meta_request(kdev_t device) { struct ud_driver *driver = get_driver(major); struct ud_request *udr; struct request *req; char *buff; int major = MAJOR(device); printk("do_meta_request %d %x\n", major, blk_dev[major].current_request); if (NULL == driver) { end_request(0, req->rq_dev); return; } printk("1"); while (1) { req = blk_dev[major].current_request; printk("2"); if (!req || req->rq_dev < 0 || req->rq_sector == -1) return; printk("5"); udr = new_request(); udr->udr_type = UDR_BLK + req->rq_cmd; udr->udr_ptr = req->rq_sector; udr->udr_minor = MINOR(req->rq_dev); printk("6"); post_request(driver, udr); printk("7"); /* Should really check here whether we have a request */ if (req->rq_cmd == WRITE) { /* Can't do this, copies to the wrong task */ #if 0 verified_memcpy_tofs(driver->udd_data, buff, BLOCK_SIZE); /* FIXME FIXME */ fmemcpy(driver->udd_task->mm.dseg, driver->udd_data, get_ds(), buff, 1024); #endif } printk("8"); /* Wake up the driver so it can deal with the request */ wake_up(&driver->udd_wait); printk("request init: wake driver, sleeping\n"); sleep_on(&udr->udr_wait); printk("request continue\n"); /* REQUEST HAS BEEN RETURNED BY USER PROGRAM */ /* request must be dealt with and ended */ if (udr->udr_status == 1) { end_request(0, req->rq_dev); udr->udr_status = 0; continue; } udr->udr_status = 0; buff = req->rq_buffer; if (req->rq_cmd == READ) { /* Can't do this, copies from the wrong task */ #if 0 verified_memcpy_fromfs(buff, driver->udd_data, BLOCK_SIZE); /* FIXME FIXME */ fmemcpy(get_ds(), buff, driver->udd_task->mm.dseg, driver->udd_data, 1024); #endif } end_request(1, req->rq_dev); wake_up(&udr->udr_wait); } }