Exemplo n.º 1
0
 void MemoryMappedFile::_unlock() {
     if (! views.empty() ) assert(mprotect(views[0], len, PROT_READ) == 0);
 }
Exemplo n.º 2
0
static void
heap_space_protect(struct heap_space *heap)
{
	mprotect(heap->base, heap->limit - heap->base, 0);
}
Exemplo n.º 3
0
	bool commitVirtualPages(uint8* baseVirtualAddress,size_t numPages)
	{
		assert(isPageAligned(baseVirtualAddress));
		return mprotect(baseVirtualAddress,numPages << getPreferredVirtualPageSizeLog2(),PROT_READ | PROT_WRITE) == 0;
	}
Exemplo n.º 4
0
/*
 * Function that does the real stuff.
 */
bpf_filter_func
bpf_jit_compile(struct bpf_insn *prog, u_int nins, size_t *size)
{
	bpf_bin_stream stream;
	struct bpf_insn *ins;
	int flags, fret, fpkt, fmem, fjmp, flen;
	u_int i, pass;

	/*
	 * NOTE: Do not modify the name of this variable, as it's used by
	 * the macros to emit code.
	 */
	emit_func emitm;

	flags = bpf_jit_optimize(prog, nins);
	fret = (flags & BPF_JIT_FRET) != 0;
	fpkt = (flags & BPF_JIT_FPKT) != 0;
	fmem = (flags & BPF_JIT_FMEM) != 0;
	fjmp = (flags & BPF_JIT_FJMP) != 0;
	flen = (flags & BPF_JIT_FLEN) != 0;

	if (fret)
		nins = 1;

	memset(&stream, 0, sizeof(stream));

	/* Allocate the reference table for the jumps. */
	if (fjmp) {
#ifdef _KERNEL
		stream.refs = malloc((nins + 1) * sizeof(u_int), M_BPFJIT,
		    M_NOWAIT | M_ZERO);
#else
		stream.refs = calloc(nins + 1, sizeof(u_int));
#endif
		if (stream.refs == NULL)
			return (NULL);
	}

	/*
	 * The first pass will emit the lengths of the instructions
	 * to create the reference table.
	 */
	emitm = emit_length;

	for (pass = 0; pass < 2; pass++) {
		ins = prog;

		/* Create the procedure header. */
		if (fmem) {
			PUSH(RBP);
			MOVrq(RSP, RBP);
			SUBib(BPF_MEMWORDS * sizeof(uint32_t), RSP);
		}
		if (flen)
			MOVrd2(ESI, R9D);
		if (fpkt) {
			MOVrq2(RDI, R8);
			MOVrd(EDX, EDI);
		}

		for (i = 0; i < nins; i++) {
			stream.bpf_pc++;

			switch (ins->code) {
			default:
#ifdef _KERNEL
				return (NULL);
#else
				abort();
#endif

			case BPF_RET|BPF_K:
				MOVid(ins->k, EAX);
				if (fmem)
					LEAVE();
				RET();
				break;

			case BPF_RET|BPF_A:
				if (fmem)
					LEAVE();
				RET();
				break;

			case BPF_LD|BPF_W|BPF_ABS:
				MOVid(ins->k, ESI);
				CMPrd(EDI, ESI);
				JAb(12);
				MOVrd(EDI, ECX);
				SUBrd(ESI, ECX);
				CMPid(sizeof(int32_t), ECX);
				if (fmem) {
					JAEb(4);
					ZEROrd(EAX);
					LEAVE();
				} else {
					JAEb(3);
					ZEROrd(EAX);
				}
				RET();
				MOVrq3(R8, RCX);
				MOVobd(RCX, RSI, EAX);
				BSWAP(EAX);
				break;

			case BPF_LD|BPF_H|BPF_ABS:
				ZEROrd(EAX);
				MOVid(ins->k, ESI);
				CMPrd(EDI, ESI);
				JAb(12);
				MOVrd(EDI, ECX);
				SUBrd(ESI, ECX);
				CMPid(sizeof(int16_t), ECX);
				if (fmem) {
					JAEb(2);
					LEAVE();
				} else
					JAEb(1);
				RET();
				MOVrq3(R8, RCX);
				MOVobw(RCX, RSI, AX);
				SWAP_AX();
				break;

			case BPF_LD|BPF_B|BPF_ABS:
				ZEROrd(EAX);
				MOVid(ins->k, ESI);
				CMPrd(EDI, ESI);
				if (fmem) {
					JBb(2);
					LEAVE();
				} else
					JBb(1);
				RET();
				MOVrq3(R8, RCX);
				MOVobb(RCX, RSI, AL);
				break;

			case BPF_LD|BPF_W|BPF_LEN:
				MOVrd3(R9D, EAX);
				break;

			case BPF_LDX|BPF_W|BPF_LEN:
				MOVrd3(R9D, EDX);
				break;

			case BPF_LD|BPF_W|BPF_IND:
				CMPrd(EDI, EDX);
				JAb(27);
				MOVid(ins->k, ESI);
				MOVrd(EDI, ECX);
				SUBrd(EDX, ECX);
				CMPrd(ESI, ECX);
				JBb(14);
				ADDrd(EDX, ESI);
				MOVrd(EDI, ECX);
				SUBrd(ESI, ECX);
				CMPid(sizeof(int32_t), ECX);
				if (fmem) {
					JAEb(4);
					ZEROrd(EAX);
					LEAVE();
				} else {
					JAEb(3);
					ZEROrd(EAX);
				}
				RET();
				MOVrq3(R8, RCX);
				MOVobd(RCX, RSI, EAX);
				BSWAP(EAX);
				break;

			case BPF_LD|BPF_H|BPF_IND:
				ZEROrd(EAX);
				CMPrd(EDI, EDX);
				JAb(27);
				MOVid(ins->k, ESI);
				MOVrd(EDI, ECX);
				SUBrd(EDX, ECX);
				CMPrd(ESI, ECX);
				JBb(14);
				ADDrd(EDX, ESI);
				MOVrd(EDI, ECX);
				SUBrd(ESI, ECX);
				CMPid(sizeof(int16_t), ECX);
				if (fmem) {
					JAEb(2);
					LEAVE();
				} else
					JAEb(1);
				RET();
				MOVrq3(R8, RCX);
				MOVobw(RCX, RSI, AX);
				SWAP_AX();
				break;

			case BPF_LD|BPF_B|BPF_IND:
				ZEROrd(EAX);
				CMPrd(EDI, EDX);
				JAEb(13);
				MOVid(ins->k, ESI);
				MOVrd(EDI, ECX);
				SUBrd(EDX, ECX);
				CMPrd(ESI, ECX);
				if (fmem) {
					JAb(2);
					LEAVE();
				} else
					JAb(1);
				RET();
				MOVrq3(R8, RCX);
				ADDrd(EDX, ESI);
				MOVobb(RCX, RSI, AL);
				break;

			case BPF_LDX|BPF_MSH|BPF_B:
				MOVid(ins->k, ESI);
				CMPrd(EDI, ESI);
				if (fmem) {
					JBb(4);
					ZEROrd(EAX);
					LEAVE();
				} else {
					JBb(3);
					ZEROrd(EAX);
				}
				RET();
				ZEROrd(EDX);
				MOVrq3(R8, RCX);
				MOVobb(RCX, RSI, DL);
				ANDib(0x0f, DL);
				SHLib(2, EDX);
				break;

			case BPF_LD|BPF_IMM:
				MOVid(ins->k, EAX);
				break;

			case BPF_LDX|BPF_IMM:
				MOVid(ins->k, EDX);
				break;

			case BPF_LD|BPF_MEM:
				MOVid(ins->k * sizeof(uint32_t), ESI);
				MOVobd(RSP, RSI, EAX);
				break;

			case BPF_LDX|BPF_MEM:
				MOVid(ins->k * sizeof(uint32_t), ESI);
				MOVobd(RSP, RSI, EDX);
				break;

			case BPF_ST:
				/*
				 * XXX this command and the following could
				 * be optimized if the previous instruction
				 * was already of this type
				 */
				MOVid(ins->k * sizeof(uint32_t), ESI);
				MOVomd(EAX, RSP, RSI);
				break;

			case BPF_STX:
				MOVid(ins->k * sizeof(uint32_t), ESI);
				MOVomd(EDX, RSP, RSI);
				break;

			case BPF_JMP|BPF_JA:
				JUMP(ins->k);
				break;

			case BPF_JMP|BPF_JGT|BPF_K:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPid(ins->k, EAX);
				JCC(JA, JBE);
				break;

			case BPF_JMP|BPF_JGE|BPF_K:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPid(ins->k, EAX);
				JCC(JAE, JB);
				break;

			case BPF_JMP|BPF_JEQ|BPF_K:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPid(ins->k, EAX);
				JCC(JE, JNE);
				break;

			case BPF_JMP|BPF_JSET|BPF_K:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				TESTid(ins->k, EAX);
				JCC(JNE, JE);
				break;

			case BPF_JMP|BPF_JGT|BPF_X:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPrd(EDX, EAX);
				JCC(JA, JBE);
				break;

			case BPF_JMP|BPF_JGE|BPF_X:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPrd(EDX, EAX);
				JCC(JAE, JB);
				break;

			case BPF_JMP|BPF_JEQ|BPF_X:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				CMPrd(EDX, EAX);
				JCC(JE, JNE);
				break;

			case BPF_JMP|BPF_JSET|BPF_X:
				if (ins->jt == ins->jf) {
					JUMP(ins->jt);
					break;
				}
				TESTrd(EDX, EAX);
				JCC(JNE, JE);
				break;

			case BPF_ALU|BPF_ADD|BPF_X:
				ADDrd(EDX, EAX);
				break;

			case BPF_ALU|BPF_SUB|BPF_X:
				SUBrd(EDX, EAX);
				break;

			case BPF_ALU|BPF_MUL|BPF_X:
				MOVrd(EDX, ECX);
				MULrd(EDX);
				MOVrd(ECX, EDX);
				break;

			case BPF_ALU|BPF_DIV|BPF_X:
				TESTrd(EDX, EDX);
				if (fmem) {
					JNEb(4);
					ZEROrd(EAX);
					LEAVE();
				} else {
					JNEb(3);
					ZEROrd(EAX);
				}
				RET();
				MOVrd(EDX, ECX);
				ZEROrd(EDX);
				DIVrd(ECX);
				MOVrd(ECX, EDX);
				break;

			case BPF_ALU|BPF_AND|BPF_X:
				ANDrd(EDX, EAX);
				break;

			case BPF_ALU|BPF_OR|BPF_X:
				ORrd(EDX, EAX);
				break;

			case BPF_ALU|BPF_LSH|BPF_X:
				MOVrd(EDX, ECX);
				SHL_CLrb(EAX);
				break;

			case BPF_ALU|BPF_RSH|BPF_X:
				MOVrd(EDX, ECX);
				SHR_CLrb(EAX);
				break;

			case BPF_ALU|BPF_ADD|BPF_K:
				ADD_EAXi(ins->k);
				break;

			case BPF_ALU|BPF_SUB|BPF_K:
				SUB_EAXi(ins->k);
				break;

			case BPF_ALU|BPF_MUL|BPF_K:
				MOVrd(EDX, ECX);
				MOVid(ins->k, EDX);
				MULrd(EDX);
				MOVrd(ECX, EDX);
				break;

			case BPF_ALU|BPF_DIV|BPF_K:
				MOVrd(EDX, ECX);
				ZEROrd(EDX);
				MOVid(ins->k, ESI);
				DIVrd(ESI);
				MOVrd(ECX, EDX);
				break;

			case BPF_ALU|BPF_AND|BPF_K:
				ANDid(ins->k, EAX);
				break;

			case BPF_ALU|BPF_OR|BPF_K:
				ORid(ins->k, EAX);
				break;

			case BPF_ALU|BPF_LSH|BPF_K:
				SHLib((ins->k) & 0xff, EAX);
				break;

			case BPF_ALU|BPF_RSH|BPF_K:
				SHRib((ins->k) & 0xff, EAX);
				break;

			case BPF_ALU|BPF_NEG:
				NEGd(EAX);
				break;

			case BPF_MISC|BPF_TAX:
				MOVrd(EAX, EDX);
				break;

			case BPF_MISC|BPF_TXA:
				MOVrd(EDX, EAX);
				break;
			}
			ins++;
		}

		if (pass > 0)
			continue;

		*size = stream.cur_ip;
#ifdef _KERNEL
		stream.ibuf = malloc(*size, M_BPFJIT, M_NOWAIT);
		if (stream.ibuf == NULL)
			break;
#else
		stream.ibuf = mmap(NULL, *size, PROT_READ | PROT_WRITE,
		    MAP_ANON, -1, 0);
		if (stream.ibuf == MAP_FAILED) {
			stream.ibuf = NULL;
			break;
		}
#endif

		/*
		 * Modify the reference table to contain the offsets and
		 * not the lengths of the instructions.
		 */
		if (fjmp)
			for (i = 1; i < nins + 1; i++)
				stream.refs[i] += stream.refs[i - 1];

		/* Reset the counters. */
		stream.cur_ip = 0;
		stream.bpf_pc = 0;

		/* The second pass creates the actual code. */
		emitm = emit_code;
	}

	/*
	 * The reference table is needed only during compilation,
	 * now we can free it.
	 */
	if (fjmp)
#ifdef _KERNEL
		free(stream.refs, M_BPFJIT);
#else
		free(stream.refs);
#endif

#ifndef _KERNEL
	if (stream.ibuf != NULL &&
	    mprotect(stream.ibuf, *size, PROT_READ | PROT_EXEC) != 0) {
		munmap(stream.ibuf, *size);
		stream.ibuf = NULL;
	}
#endif

	return ((bpf_filter_func)stream.ibuf);
}
Exemplo n.º 5
0
/**
 * This function could be called from dsm_daemon thread and the main thread
 */
int dsm_getpage_internal(dhandle chunk_id, dhandle page_offset,
    uint8_t *requestor_host, uint32_t requestor_port, /*these are needed for updating the page map*/
    uint8_t **data, uint64_t *count, uint32_t flags) {
  UNUSED(chunk_id);
  UNUSED(flags);

  int error = 0;
  dsm_conf *c = &g_dsm->c;
  dsm_chunk_meta *chunk_meta = &g_dsm->g_dsm_page_map[chunk_id];
  dsm_page_meta *page_meta = &chunk_meta->pages[page_offset];
  
  log("Acquiring mutex lock, chunk_id: %"PRIu64", %"PRIu64"\n", chunk_id, page_offset);
  pthread_mutex_lock(&page_meta->lock);
  
  char *base_ptr = chunk_meta->g_base_ptr;
  if (!g_dsm->is_master) {
    error = dsm_getpage_internal_nonmaster(chunk_meta, page_offset, data, count, flags);
    goto cleanup_unlock;
  }
  
  int owner_idx = 0;
  if ((error=dsm_locatepage_internal(chunk_id, page_offset, 
                                     &owner_idx, flags)) < 0) {
    print_err("Could not locate owner for %"PRIu64", %"PRIu64".\n", chunk_id, page_offset);
    goto cleanup_unlock;
  }
  log("Located owner for the page %d\n", owner_idx);
   
  int requestor_idx = get_request_idx(g_dsm, requestor_host, requestor_port);
  *count = PAGESIZE; 
  // check if owner host is same as this machine -
  // if yes serve the page; else get the page from owner and serve it
  if (owner_idx == c->this_node_idx || page_meta->nodes_reading[c->this_node_idx]) {
    char *page_start_addr = base_ptr + page_offset*PAGESIZE;
    memcpy(*data, page_start_addr, PAGESIZE);
  } else {
    // this machine is not the owner of the page
    // get the page from the owner
    if (owner_idx != requestor_idx) {
      dsm_request *owner = &g_dsm->clients[owner_idx];
      char *page_start_addr = base_ptr + page_offset*PAGESIZE;
      dsm_request_getpage(owner, chunk_id, 
        page_offset, g_dsm->host, g_dsm->port, data, flags);

      if ((error=mprotect(page_start_addr, PAGESIZE, PROT_WRITE|PROT_READ)) == -1) {
        print_err("mprotect failed for addr=%p, error=%s\n", page_start_addr, strerror(errno));
        goto cleanup_unlock;
      }
      memcpy(page_start_addr, *data, PAGESIZE);
      page_meta->page_prot = PROT_READ;
      page_meta->nodes_reading[c->this_node_idx] = 1;
      if ((error=mprotect(page_start_addr, PAGESIZE, PROT_READ)) == -1) {
        print_err("mprotect failed for addr=%p, error=%s\n", page_start_addr, strerror(errno));
        goto cleanup_unlock;
      }
    }
  }

  if (flags & FLAG_PAGE_WRITE) {
    // Invalidate page for all hosts but the new owner
    for (int i = 0; i < c->num_nodes; i++) {
      // Continue for requester host and request port
      if (i == requestor_idx)
        continue;

      // for master locally invalidate page
      if (i == c->master_idx) {
        char *page_start_addr = base_ptr + page_offset*PAGESIZE;
        if (page_meta->page_prot == PROT_WRITE) {
          if ((error=mprotect(page_start_addr, PAGESIZE, PROT_NONE)) == -1) {
            print_err("mprotect failed for addr=%p, error=%s\n", page_start_addr, strerror(errno));
            goto cleanup_unlock;
          }
          page_meta->page_prot = PROT_NONE;
          page_meta->nodes_reading[c->this_node_idx] = 0;
        }
        continue;
      }

      // send invalidate to only those clients which are using this chunk
      if (chunk_meta->clients_using[i]) {
        log("Sending invalidatepage for chunk_id=%"PRIu64", page_offset=%"PRIu64", host:port=%s:%d.\n", 
            chunk_id, page_offset, c->hosts[i], c->ports[i]);
        dsm_request_invalidatepage(&g_dsm->clients[i], chunk_id, 
                                 page_offset, c->hosts[i], c->ports[i], flags);
      }
    }
    // finally update the page map
    page_meta->owner_idx = requestor_idx;
  }

cleanup_unlock:
  pthread_mutex_unlock(&page_meta->lock);
  log("Released lock, chunk_id: %"PRIu64", %"PRIu64"\n", chunk_id, page_offset);
  return error;
}
Exemplo n.º 6
0
void CodeCache::unprotect() {
  mprotect(m_base, m_codeSize, PROT_READ | PROT_WRITE | PROT_EXEC);
}
Exemplo n.º 7
0
/*
 * pmempool_info_file -- print info about single file
 */
static int
pmempool_info_file(struct pmem_info *pip, const char *file_name)
{
	int ret = 0;

	pip->file_name = file_name;

	/*
	 * If force flag is set 'types' fields _must_ hold
	 * single pool type - this is validated when processing
	 * command line arguments.
	 */
	if (pip->args.force) {
		pip->type = pip->args.type;
	} else {
		if (pmem_pool_parse_params(file_name, &pip->params, 1)) {
			if (errno)
				perror(file_name);
			else
				outv_err("%s: cannot determine type of pool\n",
					file_name);
			return -1;
		}

		pip->type = pip->params.type;
	}

	if (PMEM_POOL_TYPE_UNKNOWN == pip->type) {
		outv_err("%s: unknown pool type -- '%s'\n", file_name,
				pip->params.signature);
		return -1;
	} else if (!pip->args.force && !pip->params.is_checksum_ok) {
		outv_err("%s: invalid checksum\n", file_name);
		return -1;
	} else {
		if (util_options_verify(pip->opts, pip->type))
			return -1;

		pip->pfile = pool_set_file_open(file_name, 0, !pip->args.force);
		if (!pip->pfile) {
			perror(file_name);
			return -1;
		}

		if (pip->type != PMEM_POOL_TYPE_BTT) {
			struct pool_set *ps = pip->pfile->poolset;
			for (unsigned r = 0; r < ps->nreplicas; ++r) {
				if (ps->replica[r]->remote == NULL &&
					mprotect(ps->replica[r]->part[0].addr,
					ps->replica[r]->repsize,
					PROT_READ) < 0) {
					outv_err(
					"%s: failed to change pool protection",
					pip->pfile->fname);

					ret = -1;
					goto out_close;
				}
			}
		}

		if (pip->args.obj.replica) {
			size_t nreplicas = pool_set_file_nreplicas(pip->pfile);
			if (nreplicas == 1) {
				outv_err("only master replica available");
				ret = -1;
				goto out_close;
			}

			if (pip->args.obj.replica >= nreplicas) {
				outv_err("replica number out of range"
					" (valid range is: 0-%" PRIu64 ")",
					nreplicas - 1);
				ret = -1;
				goto out_close;
			}

			if (pool_set_file_set_replica(pip->pfile,
				pip->args.obj.replica)) {
				outv_err("setting replica number failed");
				ret = -1;
				goto out_close;
			}
		}

		/* hdr info is not present in btt device */
		if (pip->type != PMEM_POOL_TYPE_BTT) {
			if (pip->params.is_poolset &&
					pmempool_info_poolset(pip,
							VERBOSE_DEFAULT)) {
				ret = -1;
				goto out_close;
			}
			if (!pip->params.is_poolset &&
					pmempool_info_part(pip, UNDEF_REPLICA,
						UNDEF_PART, VERBOSE_DEFAULT)) {
				ret = -1;
				goto out_close;
			}
			if (pmempool_info_pool_hdr(pip, VERBOSE_DEFAULT)) {
				ret = -1;
				goto out_close;
			}
		}

		if (pip->params.is_part) {
			ret = 0;
			goto out_close;
		}

		switch (pip->type) {
		case PMEM_POOL_TYPE_LOG:
			ret = pmempool_info_log(pip);
			break;
		case PMEM_POOL_TYPE_BLK:
			ret = pmempool_info_blk(pip);
			break;
		case PMEM_POOL_TYPE_OBJ:
			ret = pmempool_info_obj(pip);
			break;
		case PMEM_POOL_TYPE_BTT:
			ret = pmempool_info_btt(pip);
			break;
		case PMEM_POOL_TYPE_UNKNOWN:
		default:
			ret = -1;
			break;
		}
out_close:
		pool_set_file_close(pip->pfile);
	}

	return ret;
}
Exemplo n.º 8
0
/*
 * Allocate a number of pages from the OS
 */
static void *
map_pages(size_t pages)
{
	struct pdinfo	*pi, *spi;
	struct pginfo	**pd;
	u_long		idx, pidx, lidx;
	caddr_t		result, tail;
	u_long		index, lindex;
	void 		*pdregion = NULL;
	size_t		dirs, cnt;

	pages <<= malloc_pageshift;
	result = MMAP(pages + malloc_guard);
	if (result == MAP_FAILED) {
#ifdef MALLOC_EXTRA_SANITY
		wrtwarning("(ES): map_pages fails");
#endif /* MALLOC_EXTRA_SANITY */
		errno = ENOMEM;
		return (NULL);
	}
	index = ptr2index(result);
	tail = result + pages + malloc_guard;
	lindex = ptr2index(tail) - 1;
	if (malloc_guard)
		mprotect(result + pages, malloc_guard, PROT_NONE);

	pidx = PI_IDX(index);
	lidx = PI_IDX(lindex);

	if (tail > malloc_brk) {
		malloc_brk = tail;
		last_index = lindex;
	}

	dirs = lidx - pidx;

	/* Insert directory pages, if needed. */
	if (pdir_lookup(index, &pi) != 0)
		dirs++;
	if (dirs > 0) {
		pdregion = MMAP(malloc_pagesize * dirs);
		if (pdregion == MAP_FAILED) {
			munmap(result, tail - result);
#ifdef MALLOC_EXTRA_SANITY
		wrtwarning("(ES): map_pages fails");
#endif
			errno = ENOMEM;
			return (NULL);
		}
	}
	cnt = 0;
	for (idx = pidx, spi = pi; idx <= lidx; idx++) {
		if (pi == NULL || PD_IDX(pi->dirnum) != idx) {
			pd = (struct pginfo **)((char *)pdregion +
			    cnt * malloc_pagesize);
			cnt++;
			memset(pd, 0, malloc_pagesize);
			pi = (struct pdinfo *) ((caddr_t) pd + pdi_off);
			pi->base = pd;
			pi->prev = spi;
			pi->next = spi->next;
			pi->dirnum = idx * (malloc_pagesize /
			    sizeof(struct pginfo *));

			if (spi->next != NULL)
				spi->next->prev = pi;
			spi->next = pi;
		}
		if (idx > pidx && idx < lidx) {
			pi->dirnum += pdi_mod;
		} else if (idx == pidx) {
			if (pidx == lidx) {
				pi->dirnum += (u_long)(tail - result) >>
				    malloc_pageshift;
			} else {
				pi->dirnum += pdi_mod - PI_OFF(index);
			}
		} else {
Exemplo n.º 9
0
int
main(int argc, char *argv[])
{
	char cwd[PATH_MAX + 1];
	char *p;
	size_t clen;
	long pg;
	int i;

	initprogname(argc > 0 ? argv[0] : "mktemp_test");

	pg = sysconf(_SC_PAGESIZE);
	if (getcwd(cwd, sizeof cwd - 1) == NULL)
		sudo_fatal("getcwd");
	clen = strlen(cwd);
	cwd[clen++] = '/';
	cwd[clen] = '\0';
#ifdef MAP_ANON
	p = mmap(NULL, pg * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
#else
	i = open("/dev/zero", O_RDWR);
	if (i == -1)
		sudo_fatal("/dev/zero");
	p = mmap(NULL, pg * 3, PROT_READ | PROT_WRITE, MAP_PRIVATE, i, 0);
#endif
	if (p == MAP_FAILED)
		sudo_fatal("mmap");
	if (mprotect(p, pg, PROT_NONE) || mprotect(p + pg * 2, pg, PROT_NONE))
		sudo_fatal("mprotect");
	p += pg;

	i = MAX_TEMPLATE_LEN + 1;
	while (i-- > 0) {
		/* try first at the start of a page, no prefix */
		try_mkdtemp(p, "", i);
		/* now at the end of the page, no prefix */
		try_mkdtemp(p + pg - i - 1, "", i);
		/* start of the page, prefixed with the cwd */
		try_mkdtemp(p, cwd, i);
		/* how about at the end of the page, prefixed with cwd? */
		try_mkdtemp(p + pg - clen - i - 1, cwd, i);

		/* again, with mkstemps() and an empty suffix */
		/* try first at the start of a page, no prefix */
		try_mkstemps(p, "", i, "");
		/* now at the end of the page, no prefix */
		try_mkstemps(p + pg - i - 1, "", i, "");
		/* start of the page, prefixed with the cwd */
		try_mkstemps(p, cwd, i, "");
		/* how about at the end of the page, prefixed with cwd? */
		try_mkstemps(p + pg - clen - i - 1, cwd, i, "");

		/* mkstemps() and a non-empty suffix */
		/* try first at the start of a page, no prefix */
		try_mkstemps(p, "", i, SUFFIX);
		/* now at the end of the page, no prefix */
		try_mkstemps(p + pg - i - SLEN - 1, "", i, SUFFIX);
		/* start of the page, prefixed with the cwd */
		try_mkstemps(p, cwd, i, SUFFIX);
		/* how about at the end of the page, prefixed with cwd? */
		try_mkstemps(p + pg - clen - i - SLEN - 1, cwd, i, SUFFIX);
	}

	return 0;
}
Exemplo n.º 10
0
static void*
guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment)
{
	if (alignment == 0)
		alignment = 1;

	if (alignment > B_PAGE_SIZE) {
		panic("alignment of %" B_PRIuSIZE " not supported", alignment);
		return NULL;
	}

	size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
	if (pagesNeeded * B_PAGE_SIZE >= GUARDED_HEAP_AREA_USE_THRESHOLD) {
		// Don't bother, use an area directly. Since it will also fault once
		// it is deleted, that fits our model quite nicely.

		pagesNeeded = (size + sizeof(guarded_heap_page) + B_PAGE_SIZE - 1)
			/ B_PAGE_SIZE;

		void* address = NULL;
		area_id area = create_area("guarded_heap_huge_allocation", &address,
			B_ANY_ADDRESS, (pagesNeeded + 1) * B_PAGE_SIZE, B_NO_LOCK,
			B_READ_AREA | B_WRITE_AREA);
		if (area < 0) {
			panic("failed to create area for allocation of %" B_PRIuSIZE
				" pages", pagesNeeded);
			return NULL;
		}

		// We just use a page object
		guarded_heap_page* page = (guarded_heap_page*)address;
		page->flags = GUARDED_HEAP_PAGE_FLAG_USED
			| GUARDED_HEAP_PAGE_FLAG_FIRST | GUARDED_HEAP_PAGE_FLAG_AREA;
		page->allocation_size = size;
		page->allocation_base = (void*)(((addr_t)address
			+ pagesNeeded * B_PAGE_SIZE - size) & ~(alignment - 1));
		page->alignment = alignment;
		page->thread = find_thread(NULL);

		mprotect((void*)((addr_t)address + pagesNeeded * B_PAGE_SIZE),
			B_PAGE_SIZE, 0);

		return page->allocation_base;
	}

	void* result = NULL;

	ReadLocker areaListReadLocker(heap.lock);
	for (guarded_heap_area* area = heap.areas; area != NULL;
			area = area->next) {

		MutexLocker locker(area->lock);
		result = guarded_heap_area_allocate(*area, pagesNeeded, size,
			alignment);
		if (result != NULL)
			break;
	}

	uint32 counter = heap.area_creation_counter;
	areaListReadLocker.Unlock();

	if (result == NULL) {
		guarded_heap_add_area(heap, counter);
		return guarded_heap_allocate(heap, size, alignment);
	}

	if (result == NULL)
		panic("ran out of memory");

	return result;
}
Exemplo n.º 11
0
BOOL MemProtect(void * addr, size_t size, MEM_PROTECTION protect)
{
    
    return (-1 != mprotect(addr, size, SysProtection(protect)));
}
Exemplo n.º 12
0
/**
 * Map a memory region for a device using the Linux sysfs interface.
 *
 * \param dev   Device whose memory region is to be mapped.
 * \param map   Parameters of the mapping that is to be created.
 *
 * \return
 * Zero on success or an \c errno value on failure.
 *
 * \sa pci_device_map_rrange, pci_device_linux_sysfs_unmap_range
 *
 * \todo
 * Some older 2.6.x kernels don't implement the resourceN files.  On those
 * systems /dev/mem must be used.  On these systems it is also possible that
 * \c mmap64 may need to be used.
 */
static int
pci_device_linux_sysfs_map_range(struct pci_device *dev,
                                 struct pci_device_mapping *map)
{
    char name[256];
    int fd;
    int err = 0;
    const int prot = ((map->flags & PCI_DEV_MAP_FLAG_WRITABLE) != 0)
        ? (PROT_READ | PROT_WRITE) : PROT_READ;
    const int open_flags = ((map->flags & PCI_DEV_MAP_FLAG_WRITABLE) != 0)
        ? O_RDWR : O_RDONLY;
    const off_t offset = map->base - dev->regions[map->region].base_addr;
#ifdef HAVE_MTRR
    struct mtrr_sentry sentry = {
	.base = map->base,
        .size = map->size,
	.type = MTRR_TYPE_UNCACHABLE
    };
#endif

    /* For WC mappings, try sysfs resourceN_wc file first */
    if ((map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE) &&
	!pci_device_linux_sysfs_map_range_wc(dev, map))
	    return 0;

    snprintf(name, 255, "%s/%04x:%02x:%02x.%1u/resource%u",
             SYS_BUS_PCI,
             dev->domain,
             dev->bus,
             dev->dev,
             dev->func,
             map->region);

    fd = open(name, open_flags | O_CLOEXEC);
    if (fd == -1) {
        return errno;
    }


    map->memory = mmap(NULL, map->size, prot, MAP_SHARED, fd, offset);
    if (map->memory == MAP_FAILED) {
        map->memory = NULL;
	close(fd);
	return errno;
    }

#ifdef HAVE_MTRR
    if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) != 0) {
        sentry.type = MTRR_TYPE_WRBACK;
    } else if ((map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE) != 0) {
        sentry.type = MTRR_TYPE_WRCOMB;
    }

    if (pci_sys->mtrr_fd != -1 && sentry.type != MTRR_TYPE_UNCACHABLE) {
	if (ioctl(pci_sys->mtrr_fd, MTRRIOC_ADD_ENTRY, &sentry) < 0) {
	    /* FIXME: Should we report an error in this case?
	     */
	    fprintf(stderr, "error setting MTRR "
		    "(base = 0x%08lx, size = 0x%08x, type = %u) %s (%d)\n",
		    sentry.base, sentry.size, sentry.type,
		    strerror(errno), errno);
/*            err = errno;*/
	}
	/* KLUDGE ALERT -- rewrite the PTEs to turn off the CD and WT bits */
	mprotect (map->memory, map->size, PROT_NONE);
	err = mprotect (map->memory, map->size, PROT_READ|PROT_WRITE);

	if (err != 0) {
	    fprintf(stderr, "mprotect(PROT_READ | PROT_WRITE) failed: %s\n",
		    strerror(errno));
	    fprintf(stderr, "remapping without mprotect performance kludge.\n");

	    munmap(map->memory, map->size);
	    map->memory = mmap(NULL, map->size, prot, MAP_SHARED, fd, offset);
	    if (map->memory == MAP_FAILED) {
		map->memory = NULL;
		close(fd);
		return errno;
	    }
	}
    }
#endif

    close(fd);

    return 0;
}

/**
 * Unmap a memory region for a device using the Linux sysfs interface.
 *
 * \param dev   Device whose memory region is to be unmapped.
 * \param map   Parameters of the mapping that is to be destroyed.
 *
 * \return
 * Zero on success or an \c errno value on failure.
 *
 * \sa pci_device_map_rrange, pci_device_linux_sysfs_map_range
 *
 * \todo
 * Some older 2.6.x kernels don't implement the resourceN files.  On those
 * systems /dev/mem must be used.  On these systems it is also possible that
 * \c mmap64 may need to be used.
 */
static int
pci_device_linux_sysfs_unmap_range(struct pci_device *dev,
				   struct pci_device_mapping *map)
{
    int err = 0;
#ifdef HAVE_MTRR
    struct mtrr_sentry sentry = {
	.base = map->base,
        .size = map->size,
	.type = MTRR_TYPE_UNCACHABLE
    };
#endif

    err = pci_device_generic_unmap_range (dev, map);
    if (err)
	return err;

#ifdef HAVE_MTRR
    if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) != 0) {
        sentry.type = MTRR_TYPE_WRBACK;
    } else if ((map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE) != 0) {
        sentry.type = MTRR_TYPE_WRCOMB;
    }

    if (pci_sys->mtrr_fd != -1 && sentry.type != MTRR_TYPE_UNCACHABLE) {
	if (ioctl(pci_sys->mtrr_fd, MTRRIOC_DEL_ENTRY, &sentry) < 0) {
	    /* FIXME: Should we report an error in this case?
	     */
	    fprintf(stderr, "error setting MTRR "
		    "(base = 0x%08lx, size = 0x%08x, type = %u) %s (%d)\n",
		    sentry.base, sentry.size, sentry.type,
		    strerror(errno), errno);
/*            err = errno;*/
	}
    }
#endif

    return err;
}

static void pci_device_linux_sysfs_enable(struct pci_device *dev)
{
    char name[256];
    int fd;

    snprintf( name, 255, "%s/%04x:%02x:%02x.%1u/enable",
	      SYS_BUS_PCI,
	      dev->domain,
	      dev->bus,
	      dev->dev,
	      dev->func );

    fd = open( name, O_RDWR | O_CLOEXEC);
    if (fd == -1)
       return;

    write( fd, "1", 1 );
    close(fd);
}
Exemplo n.º 13
0
struct stack *
_rthread_alloc_stack(pthread_t thread)
{
	struct stack *stack;
	u_int32_t rnd;
	caddr_t base;
	caddr_t guard;
	size_t size;
	size_t guardsize;

	/* if the request uses the defaults, try to reuse one */
	if (thread->attr.stack_addr == NULL &&
	    thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
	    thread->attr.guard_size == _rthread_attr_default.guard_size) {
		_spinlock(&def_stacks_lock);
		stack = SLIST_FIRST(&def_stacks);
		if (stack != NULL) {
			SLIST_REMOVE_HEAD(&def_stacks, link);
			_spinunlock(&def_stacks_lock);
			return (stack);
		}
		_spinunlock(&def_stacks_lock);
	}

	/* allocate the stack struct that we'll return */
	stack = malloc(sizeof(*stack));
	if (stack == NULL)
		return (NULL);

	/* Smaller the stack, smaller the random bias */
	if (thread->attr.stack_size > _thread_pagesize)
		rnd = arc4random() & (_thread_pagesize - 1);
	else if (thread->attr.stack_size == _thread_pagesize)
		rnd = arc4random() & (_thread_pagesize / 16 - 1);
	else
		rnd = 0;
	rnd &= ~_STACKALIGNBYTES;

	/* If a stack address was provided, just fill in the details */
	if (thread->attr.stack_addr != NULL) {
		stack->base = base = thread->attr.stack_addr;
		stack->len  = thread->attr.stack_size;
#ifdef MACHINE_STACK_GROWS_UP
		stack->sp = base + rnd;
#else
		stack->sp = base + thread->attr.stack_size - rnd;
#endif
		/*
		 * This impossible guardsize marks this stack as
		 * application allocated so it won't be freed or
		 * cached by _rthread_free_stack()
		 */
		stack->guardsize = 1;
		return (stack);
	}

	/* round up the requested sizes up to full pages */
	size = ROUND_TO_PAGE(thread->attr.stack_size);
	guardsize = ROUND_TO_PAGE(thread->attr.guard_size);

	/* check for overflow */
	if (size < thread->attr.stack_size ||
	    guardsize < thread->attr.guard_size ||
	    SIZE_MAX - size < guardsize) {
		free(stack);
		errno = EINVAL;
		return (NULL);
	}
	size += guardsize;

	/* actually allocate the real stack */
	base = mmap(NULL, size, PROT_READ | PROT_WRITE,
	    MAP_PRIVATE | MAP_ANON, -1, 0);
	if (base == MAP_FAILED) {
		free(stack);
		return (NULL);
	}

#ifdef MACHINE_STACK_GROWS_UP
	guard = base + size - guardsize;
	stack->sp = base + rnd;
#else
	guard = base;
	stack->sp = base + size - rnd;
#endif

	/* memory protect the guard region */
	if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
		munmap(base, size);
		free(stack);
		return (NULL);
	}

	stack->base = base;
	stack->guardsize = guardsize;
	stack->len = size;
	return (stack);
}
Exemplo n.º 14
0
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
	kern_return_t kr;
	mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
	mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
	mach_vm_address_t vm_addr = vm_page_size;
	while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
			vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
			MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT))) {
		if (kr != KERN_NO_SPACE) {
			(void)dispatch_assume_zero(kr);
			DISPATCH_CLIENT_CRASH("Could not allocate heap");
		}
		_dispatch_temporary_resource_shortage();
		vm_addr = vm_page_size;
	}
	uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
	const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
	void *region_p;
	while (!dispatch_assume((region_p = mmap(NULL, region_sz,
			PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
			VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
		_dispatch_temporary_resource_shortage();
	}
	uintptr_t region = (uintptr_t)region_p;
	uintptr_t region_end = region + region_sz;
	uintptr_t aligned_region, aligned_region_end;
	uintptr_t bottom_slop_len, top_slop_len;
	// Realign if needed; find the slop at top/bottom to unmap
	if ((region & ~(MAGAZINE_MASK)) == 0) {
		bottom_slop_len = 0;
		aligned_region = region;
		aligned_region_end = region_end - BYTES_PER_MAGAZINE;
		top_slop_len = BYTES_PER_MAGAZINE;
	} else {
		aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
		aligned_region_end = aligned_region +
				(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
		bottom_slop_len = aligned_region - region;
		top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
	}
#if DISPATCH_DEBUG
	// Double-check our math.
	dispatch_assert(aligned_region % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end > aligned_region);
	dispatch_assert(top_slop_len % PAGE_SIZE == 0);
	dispatch_assert(bottom_slop_len % PAGE_SIZE == 0);
	dispatch_assert(aligned_region_end + top_slop_len == region_end);
	dispatch_assert(region + bottom_slop_len == aligned_region);
	dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
			MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
				PROT_NONE));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
				top_slop_len, PROT_NONE));
	}
#else
	if (bottom_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
	}
	if (top_slop_len) {
		(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
				top_slop_len));
	}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH

	if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
			relaxed)) {
		// If we lost the race to link in the new region, unmap the whole thing.
#if DISPATCH_DEBUG
		(void)dispatch_assume_zero(mprotect((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
		(void)dispatch_assume_zero(munmap((void *)aligned_region,
				MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
	}
}
Exemplo n.º 15
0
/*
=================
VM_Compile
=================
*/
void VM_Compile( vm_t *vm, vmHeader_t *header ) {
	int		op;
	int		maxLength;
	int		v;
	int		i;
	qboolean opt;

	// allocate a very large temp buffer, we will shrink it later
	maxLength = header->codeLength * 8;
	buf = (unsigned char *)Z_Malloc( maxLength, TAG_VM, qtrue  );
	jused = (unsigned char *)Z_Malloc(header->instructionCount + 2, TAG_VM, qtrue  );
	
	Com_Memset(jused, 0, header->instructionCount+2);

	for(pass=0;pass<2;pass++) {
	oc0 = -23423;
	oc1 = -234354;
	pop0 = -43435;
	pop1 = -545455;

	// translate all instructions
	pc = 0;
	instruction = 0;
	code = (byte *)header + header->codeOffset;
	compiledOfs = 0;

	LastCommand = LAST_COMMAND_NONE;

	while ( instruction < header->instructionCount ) {
		if ( compiledOfs > maxLength - 16 ) {
			Com_Error( ERR_FATAL, "VM_CompileX86: maxLength exceeded" );
		}

		vm->instructionPointers[ instruction ] = compiledOfs;
		instruction++;

		if ( pc > header->codeLength ) {
			Com_Error( ERR_FATAL, "VM_CompileX86: pc > header->codeLength" );
		}

		op = code[ pc ];
		pc++;
		switch ( op ) {
		case 0:
			break;
		case OP_BREAK:
			EmitString( "CC" );			// int 3
			break;
		case OP_ENTER:
			EmitString( "81 EE" );		// sub	esi, 0x12345678
			Emit4( Constant4() );
			break;
		case OP_CONST:
			if (code[pc+4] == OP_LOAD4) {
				EmitAddEDI4(vm);
				EmitString( "BB" );		// mov	ebx, 0x12345678
				Emit4( (Constant4()&vm->dataMask) + (int)vm->dataBase);
				EmitString( "8B 03" );		// mov	eax, dword ptr [ebx]
				EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
				pc++;						// OP_LOAD4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_LOAD2) {
				EmitAddEDI4(vm);
				EmitString( "BB" );		// mov	ebx, 0x12345678
				Emit4( (Constant4()&vm->dataMask) + (int)vm->dataBase);
				EmitString( "0F B7 03" );	// movzx	eax, word ptr [ebx]
				EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
				pc++;						// OP_LOAD4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_LOAD1) {
				EmitAddEDI4(vm);
				EmitString( "BB" );		// mov	ebx, 0x12345678
				Emit4( (Constant4()&vm->dataMask) + (int)vm->dataBase);
				EmitString( "0F B6 03" );	// movzx	eax, byte ptr [ebx]
				EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
				pc++;						// OP_LOAD4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_STORE4) {
				opt = EmitMovEBXEDI(vm, (vm->dataMask & ~3));
				EmitString( "B8" );			// mov	eax, 0x12345678
				Emit4( Constant4() );
//				if (!opt) {
//					EmitString( "81 E3" );		// and ebx, 0x12345678
//					Emit4( vm->dataMask & ~3 );
//				}
				EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
				Emit4( (int)vm->dataBase );
				EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
				pc++;						// OP_STORE4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_STORE2) {
				opt = EmitMovEBXEDI(vm, (vm->dataMask & ~1));
				EmitString( "B8" );			// mov	eax, 0x12345678
				Emit4( Constant4() );
//				if (!opt) {
//					EmitString( "81 E3" );		// and ebx, 0x12345678
//					Emit4( vm->dataMask & ~1 );
//				}
				EmitString( "66 89 83" );	// mov word ptr [ebx+0x12345678], eax
				Emit4( (int)vm->dataBase );
				EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
				pc++;						// OP_STORE4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_STORE1) {
				opt = EmitMovEBXEDI(vm, vm->dataMask);
				EmitString( "B8" );			// mov	eax, 0x12345678
				Emit4( Constant4() );
//				if (!opt) {
//					EmitString( "81 E3" );	// and ebx, 0x12345678
//					Emit4( vm->dataMask );
//				}
				EmitString( "88 83" );		// mov byte ptr [ebx+0x12345678], eax
				Emit4( (int)vm->dataBase );
				EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
				pc++;						// OP_STORE4
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_ADD) {
				EmitString( "81 07" );		// add dword ptr [edi], 0x1234567
				Emit4( Constant4() );
				pc++;						// OP_ADD
				instruction += 1;
				break;
			}
			if (code[pc+4] == OP_SUB) {
				EmitString( "81 2F" );		// sub dword ptr [edi], 0x1234567
				Emit4( Constant4() );
				pc++;						// OP_ADD
				instruction += 1;
				break;
			}
			EmitAddEDI4(vm);
			EmitString( "C7 07" );		// mov	dword ptr [edi], 0x12345678
			lastConst = Constant4();
			Emit4( lastConst );
			if (code[pc] == OP_JUMP) {
				jused[lastConst] = 1;
			}
			break;
		case OP_LOCAL:
			EmitAddEDI4(vm);
			EmitString( "8D 86" );		// lea eax, [0x12345678 + esi]
			oc0 = oc1;
			oc1 = Constant4();
			Emit4( oc1 );
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;
		case OP_ARG:
			EmitMovEAXEDI(vm);			// mov	eax,dword ptr [edi]
			EmitString( "89 86" );		// mov	dword ptr [esi+database],eax
			// FIXME: range check
			Emit4( Constant1() + (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_CALL:
			EmitString( "C7 86" );		// mov dword ptr [esi+database],0x12345678
			Emit4( (int)vm->dataBase );
			Emit4( pc );
			EmitString( "FF 15" );		// call asmCallPtr
			Emit4( (int)&asmCallPtr );
			break;
		case OP_PUSH:
			EmitAddEDI4(vm);
			break;
		case OP_POP:
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_LEAVE:
			v = Constant4();
			EmitString( "81 C6" );		// add	esi, 0x12345678
			Emit4( v );
			EmitString( "C3" );			// ret
			break;
		case OP_LOAD4:
			if (code[pc] == OP_CONST && code[pc+5] == OP_ADD && code[pc+6] == OP_STORE4) {
				if (oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
					compiledOfs -= 11;
					vm->instructionPointers[ instruction-1 ] = compiledOfs;
				}
				pc++;						// OP_CONST
				v = Constant4();
				EmitMovEBXEDI(vm, vm->dataMask);
				if (v == 1 && oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
					EmitString( "FF 83");		// inc dword ptr [ebx + 0x12345678]
					Emit4( (int)vm->dataBase );
				} else {
					EmitString( "8B 83" );		// mov	eax, dword ptr [ebx + 0x12345678]
					Emit4( (int)vm->dataBase );
					EmitString( "05"  );		// add eax, const
					Emit4( v );
					if (oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
						EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
						Emit4( (int)vm->dataBase );
					} else {
						EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
						EmitString( "8B 1F" );		// mov	ebx, dword ptr [edi]
						EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
						Emit4( (int)vm->dataBase );
					}
				}
				EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
				pc++;						// OP_ADD
				pc++;						// OP_STORE
				instruction += 3;
				break;
			}

			if (code[pc] == OP_CONST && code[pc+5] == OP_SUB && code[pc+6] == OP_STORE4) {
				if (oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
					compiledOfs -= 11;
					vm->instructionPointers[ instruction-1 ] = compiledOfs;
				}
				EmitMovEBXEDI(vm, vm->dataMask);
				EmitString( "8B 83" );		// mov	eax, dword ptr [ebx + 0x12345678]
				Emit4( (int)vm->dataBase );
				pc++;						// OP_CONST
				v = Constant4();
				if (v == 1 && oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
					EmitString( "FF 8B");		// dec dword ptr [ebx + 0x12345678]
					Emit4( (int)vm->dataBase );
				} else {
					EmitString( "2D"  );		// sub eax, const
					Emit4( v );
					if (oc0 == oc1 && pop0 == OP_LOCAL && pop1 == OP_LOCAL) {
						EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
						Emit4( (int)vm->dataBase );
					} else {
						EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
						EmitString( "8B 1F" );		// mov	ebx, dword ptr [edi]
						EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
						Emit4( (int)vm->dataBase );
					}
				}
				EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
				pc++;						// OP_SUB
				pc++;						// OP_STORE
				instruction += 3;
				break;
			}

			if (buf[compiledOfs-2] == 0x89 && buf[compiledOfs-1] == 0x07) {
				compiledOfs -= 2;
				vm->instructionPointers[ instruction-1 ] = compiledOfs;
				EmitString( "8B 80");	// mov eax, dword ptr [eax + 0x1234567]
				Emit4( (int)vm->dataBase );
				EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
				break;
			}
			EmitMovEBXEDI(vm, vm->dataMask);
			EmitString( "8B 83" );		// mov	eax, dword ptr [ebx + 0x12345678]
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;
		case OP_LOAD2:
			EmitMovEBXEDI(vm, vm->dataMask);
			EmitString( "0F B7 83" );	// movzx	eax, word ptr [ebx + 0x12345678]
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;
		case OP_LOAD1:
			EmitMovEBXEDI(vm, vm->dataMask);
			EmitString( "0F B6 83" );	// movzx eax, byte ptr [ebx + 0x12345678]
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;
		case OP_STORE4:
			EmitMovEAXEDI(vm);	
			EmitString( "8B 5F FC" );	// mov	ebx, dword ptr [edi-4]
//			if (pop1 != OP_CALL) {
//				EmitString( "81 E3" );		// and ebx, 0x12345678
//				Emit4( vm->dataMask & ~3 );
//			}
			EmitString( "89 83" );		// mov dword ptr [ebx+0x12345678], eax
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			break;
		case OP_STORE2:
			EmitMovEAXEDI(vm);	
			EmitString( "8B 5F FC" );	// mov	ebx, dword ptr [edi-4]
//			EmitString( "81 E3" );		// and ebx, 0x12345678
//			Emit4( vm->dataMask & ~1 );
			EmitString( "66 89 83" );	// mov word ptr [ebx+0x12345678], eax
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			break;
		case OP_STORE1:
			EmitMovEAXEDI(vm);	
			EmitString( "8B 5F FC" );	// mov	ebx, dword ptr [edi-4]
//			EmitString( "81 E3" );		// and ebx, 0x12345678
//			Emit4( vm->dataMask );
			EmitString( "88 83" );		// mov byte ptr [ebx+0x12345678], eax
			Emit4( (int)vm->dataBase );
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			break;

		case OP_EQ:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "75 06" );		// jne +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_NE:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "74 06" );		// je +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_LTI:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "7D 06" );		// jnl +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_LEI:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "7F 06" );		// jnle +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_GTI:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "7E 06" );		// jng +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_GEI:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "7C 06" );		// jnge +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_LTU:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "73 06" );		// jnb +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_LEU:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "77 06" );		// jnbe +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_GTU:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "76 06" );		// jna +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_GEU:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "8B 47 04" );	// mov	eax, dword ptr [edi+4]
			EmitString( "3B 47 08" );	// cmp	eax, dword ptr [edi+8]
			EmitString( "72 06" );		// jnae +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;
		case OP_EQF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 40" );	// test	ah,0x40
			EmitString( "74 06" );		// je +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_NEF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 40" );	// test	ah,0x40
			EmitString( "75 06" );		// jne +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_LTF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 01" );	// test	ah,0x01
			EmitString( "74 06" );		// je +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_LEF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 41" );	// test	ah,0x41
			EmitString( "74 06" );		// je +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_GTF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 41" );	// test	ah,0x41
			EmitString( "75 06" );		// jne +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_GEF:
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			EmitString( "D9 47 04" );	// fld dword ptr [edi+4]
			EmitString( "D8 5F 08" );	// fcomp dword ptr [edi+8]
			EmitString( "DF E0" );		// fnstsw ax
			EmitString( "F6 C4 01" );	// test	ah,0x01
			EmitString( "75 06" );		// jne +6
			EmitString( "FF 25" );		// jmp	[0x12345678]
			v = Constant4();
			jused[v] = 1;
			Emit4( (int)vm->instructionPointers + v*4 );
			break;			
		case OP_NEGI:
			EmitString( "F7 1F" );		// neg dword ptr [edi]
			break;
		case OP_ADD:
			EmitMovEAXEDI(vm);			// mov eax, dword ptr [edi]
			EmitString( "01 47 FC" );	// add dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_SUB:
			EmitMovEAXEDI(vm);			// mov eax, dword ptr [edi]
			EmitString( "29 47 FC" );	// sub dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_DIVI:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "99" );			// cdq
			EmitString( "F7 3F" );		// idiv dword ptr [edi]
			EmitString( "89 47 FC" );	// mov dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_DIVU:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "33 D2" );		// xor edx, edx
			EmitString( "F7 37" );		// div dword ptr [edi]
			EmitString( "89 47 FC" );	// mov dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_MODI:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "99" );			// cdq
			EmitString( "F7 3F" );		// idiv dword ptr [edi]
			EmitString( "89 57 FC" );	// mov dword ptr [edi-4],edx
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_MODU:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "33 D2" );		// xor edx, edx
			EmitString( "F7 37" );		// div dword ptr [edi]
			EmitString( "89 57 FC" );	// mov dword ptr [edi-4],edx
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_MULI:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "F7 2F" );		// imul dword ptr [edi]
			EmitString( "89 47 FC" );	// mov dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_MULU:
			EmitString( "8B 47 FC" );	// mov eax,dword ptr [edi-4]
			EmitString( "F7 27" );		// mul dword ptr [edi]
			EmitString( "89 47 FC" );	// mov dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_BAND:
			EmitMovEAXEDI(vm);			// mov eax, dword ptr [edi]
			EmitString( "21 47 FC" );	// and dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_BOR:
			EmitMovEAXEDI(vm);			// mov eax, dword ptr [edi]
			EmitString( "09 47 FC" );	// or dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_BXOR:
			EmitMovEAXEDI(vm);			// mov eax, dword ptr [edi]
			EmitString( "31 47 FC" );	// xor dword ptr [edi-4],eax
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_BCOM:
			EmitString( "F7 17" );		// not dword ptr [edi]
			break;
		case OP_LSH:
			EmitString( "8B 0F" );		// mov ecx, dword ptr [edi]
			EmitString( "D3 67 FC" );	// shl dword ptr [edi-4], cl
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_RSHI:
			EmitString( "8B 0F" );		// mov ecx, dword ptr [edi]
			EmitString( "D3 7F FC" );	// sar dword ptr [edi-4], cl
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_RSHU:
			EmitString( "8B 0F" );		// mov ecx, dword ptr [edi]
			EmitString( "D3 6F FC" );	// shr dword ptr [edi-4], cl
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_NEGF:
			EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "D9 E0" );		// fchs
			EmitString( "D9 1F" );		// fstp dword ptr [edi]
			break;
		case OP_ADDF:
			EmitString( "D9 47 FC" );	// fld dword ptr [edi-4]
			EmitString( "D8 07" );		// fadd dword ptr [edi]
			EmitString( "D9 5F FC" );	// fstp dword ptr [edi-4]
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			break;
		case OP_SUBF:
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "D8 67 04" );	// fsub dword ptr [edi+4]
			EmitString( "D9 1F" );		// fstp dword ptr [edi]
			break;
		case OP_DIVF:
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "D8 77 04" );	// fdiv dword ptr [edi+4]
			EmitString( "D9 1F" );		// fstp dword ptr [edi]
			break;
		case OP_MULF:
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "D8 4f 04" );	// fmul dword ptr [edi+4]
			EmitString( "D9 1F" );		// fstp dword ptr [edi]
			break;
		case OP_CVIF:
			EmitString( "DB 07" );		// fild dword ptr [edi]
			EmitString( "D9 1F" );		// fstp dword ptr [edi]
			break;
		case OP_CVFI:
#ifndef FTOL_PTR // WHENHELLISFROZENOVER  // bk001213 - was used in 1.17
			// not IEEE complient, but simple and fast
		  EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "DB 1F" );		// fistp dword ptr [edi]
#else // FTOL_PTR
			// call the library conversion function
			EmitString( "D9 07" );		// fld dword ptr [edi]
			EmitString( "FF 15" );		// call ftolPtr
			Emit4( (int)&ftolPtr );
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
#endif
			break;
		case OP_SEX8:
			EmitString( "0F BE 07" );	// movsx eax, byte ptr [edi]
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;
		case OP_SEX16:
			EmitString( "0F BF 07" );	// movsx eax, word ptr [edi]
			EmitCommand(LAST_COMMAND_MOV_EDI_EAX);		// mov dword ptr [edi], eax
			break;

		case OP_BLOCK_COPY:
			// FIXME: range check
			EmitString( "56" );			// push esi
			EmitString( "57" );			// push edi
			EmitString( "8B 37" );		// mov esi,[edi] 
			EmitString( "8B 7F FC" );	// mov edi,[edi-4] 
			EmitString( "B9" );			// mov ecx,0x12345678
			Emit4( Constant4() >> 2 );
			EmitString( "B8" );			// mov eax, datamask
			Emit4( vm->dataMask );
			EmitString( "BB" );			// mov ebx, database
			Emit4( (int)vm->dataBase );
			EmitString( "23 F0" );		// and esi, eax
			EmitString( "03 F3" );		// add esi, ebx
			EmitString( "23 F8" );		// and edi, eax
			EmitString( "03 FB" );		// add edi, ebx
			EmitString( "F3 A5" );		// rep movsd
			EmitString( "5F" );			// pop edi
			EmitString( "5E" );			// pop esi
			EmitCommand(LAST_COMMAND_SUB_DI_8);		// sub edi, 8
			break;

		case OP_JUMP:
			EmitCommand(LAST_COMMAND_SUB_DI_4);		// sub edi, 4
			EmitString( "8B 47 04" );	// mov eax,dword ptr [edi+4]
			// FIXME: range check
			EmitString( "FF 24 85" );	// jmp dword ptr [instructionPointers + eax * 4]
			Emit4( (int)vm->instructionPointers );
			break;
		default:
			Com_Error( ERR_DROP, "VM_CompileX86: bad opcode %i at offset %i", op, pc );
		}
		pop0 = pop1;
		pop1 = op;
	}
	}

	// copy to an exact size buffer on the hunk
	vm->codeLength = compiledOfs;
	vm->codeBase = (unsigned char *)Hunk_Alloc( compiledOfs, h_low );
	Com_Memcpy( vm->codeBase, buf, compiledOfs );
	Z_Free( buf );
	Z_Free( jused );
	Com_Printf( "VM file %s compiled to %i bytes of code\n", vm->name, compiledOfs);

	// offset all the instruction pointers for the new location
	for ( i = 0 ; i < header->instructionCount ; i++ ) {
		vm->instructionPointers[i] += (int)vm->codeBase;
	}

#if 0 // ndef _WIN32
	// Must make the newly generated code executable
	{
		int r;
		unsigned long addr;
		int psize = getpagesize();

		addr = ((int)vm->codeBase & ~(psize-1)) - psize;

		r = mprotect((char*)addr, vm->codeLength + (int)vm->codeBase - addr + psize, 
			PROT_READ | PROT_WRITE | PROT_EXEC );

		if (r < 0)
			Com_Error( ERR_FATAL, "mprotect failed to change PROT_EXEC" );
	}
#endif

}
Exemplo n.º 16
0
internal_function
new_heap (size_t size, size_t top_pad)
{
  size_t page_mask = GLRO (dl_pagesize) - 1;
  char *p1, *p2;
  unsigned long ul;
  heap_info *h;

  if (size + top_pad < HEAP_MIN_SIZE)
    size = HEAP_MIN_SIZE;
  else if (size + top_pad <= HEAP_MAX_SIZE)
    size += top_pad;
  else if (size > HEAP_MAX_SIZE)
    return 0;
  else
    size = HEAP_MAX_SIZE;
  size = (size + page_mask) & ~page_mask;

  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
     No swap space needs to be reserved for the following large
     mapping (on Linux, this is the case for all non-writable mappings
     anyway). */
  p2 = MAP_FAILED;
  if (aligned_heap_area)
    {
      p2 = (char *) MMAP (aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
                          MAP_NORESERVE);
      aligned_heap_area = NULL;
      if (p2 != MAP_FAILED && ((unsigned long) p2 & (HEAP_MAX_SIZE - 1)))
        {
          munmap (p2, HEAP_MAX_SIZE);
          p2 = MAP_FAILED;
        }
    }
  if (p2 == MAP_FAILED)
    {
      p1 = (char *) MMAP (0, HEAP_MAX_SIZE << 1, PROT_NONE, MAP_NORESERVE);
      if (p1 != MAP_FAILED)
        {
          p2 = (char *) (((unsigned long) p1 + (HEAP_MAX_SIZE - 1))
                         & ~(HEAP_MAX_SIZE - 1));
          ul = p2 - p1;
          if (ul)
            munmap (p1, ul);
          else
            aligned_heap_area = p2 + HEAP_MAX_SIZE;
          munmap (p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
        }
      else
        {
          /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
             is already aligned. */
          p2 = (char *) MMAP (0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
          if (p2 == MAP_FAILED)
            return 0;

          if ((unsigned long) p2 & (HEAP_MAX_SIZE - 1))
            {
              munmap (p2, HEAP_MAX_SIZE);
              return 0;
            }
        }
    }
  if (mprotect (p2, size, PROT_READ | PROT_WRITE) != 0)
    {
      munmap (p2, HEAP_MAX_SIZE);
      return 0;
    }
  h = (heap_info *) p2;
  h->size = size;
  h->mprotect_size = size;
  /* LIBC_PROBE (memory_heap_new, 2, h, h->size); */
  return h;
}
Exemplo n.º 17
0
static int mcode_setprot(void *p, size_t sz, int prot)
{
  return mprotect(p, sz, prot);
}
Exemplo n.º 18
0
bool
MappableSeekableZStream::ensure(const void *addr)
{
    DEBUG_LOG("ensure @%p", addr);
    const void *addrPage = PageAlignedPtr(addr);
    /* Find the mapping corresponding to the given page */
    std::vector<LazyMap>::iterator map;
    for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
        if (map->Contains(addrPage))
            break;
    }
    if (map == lazyMaps.end())
        return false;

    /* Find corresponding chunk */
    off_t mapOffset = map->offsetOf(addrPage);
    off_t chunk = mapOffset / zStream.GetChunkSize();

    /* In the typical case, we just need to decompress the chunk entirely. But
     * when the current mapping ends in the middle of the chunk, we want to
     * stop at the end of the corresponding page.
     * However, if another mapping needs the last part of the chunk, we still
     * need to continue. As mappings are ordered by offset and length, we don't
     * need to scan the entire list of mappings.
     * It is safe to run through lazyMaps here because the linker is never
     * going to call mmap (which adds lazyMaps) while this function is
     * called. */
    size_t length = zStream.GetChunkSize(chunk);
    off_t chunkStart = chunk * zStream.GetChunkSize();
    off_t chunkEnd = chunkStart + length;
    std::vector<LazyMap>::iterator it;
    for (it = map; it < lazyMaps.end(); ++it) {
        if (chunkEnd <= it->endOffset())
            break;
    }
    if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
        /* The mapping "it" points at now is past the interesting one */
        --it;
        length = it->endOffset() - chunkStart;
    }

    length = PageAlignedSize(length);

    /* The following lock can be re-acquired by the thread holding it.
     * If this happens, it means the following code is interrupted somehow by
     * some signal, and ends up retriggering a chunk decompression for the
     * same MappableSeekableZStream.
     * If the chunk to decompress is different the second time, then everything
     * is safe as the only common data touched below is chunkAvailNum, and it is
     * atomically updated (leaving out any chance of an interruption while it is
     * updated affecting the result). If the chunk to decompress is the same, the
     * worst thing that can happen is chunkAvailNum being incremented one too
     * many times, which doesn't affect functionality. The chances of it
     * happening being pretty slim, and the effect being harmless, we can just
     * ignore the issue. Other than that, we'd just be wasting time decompressing
     * the same chunk twice. */
    AutoLock lock(&mutex);

    /* The very first page is mapped and accessed separately of the rest, and
     * as such, only the first page of the first chunk is decompressed this way.
     * When we fault in the remaining pages of that chunk, we want to decompress
     * the complete chunk again. Short of doing that, we would end up with
     * no data between PageSize() and chunkSize, which would effectively corrupt
     * symbol resolution in the underlying library. */
    if (chunkAvail[chunk] < PageNumber(length)) {
        if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
            return false;

#if defined(ANDROID) && defined(__arm__)
        if (map->prot & PROT_EXEC) {
            /* We just extracted data that may be executed in the future.
             * We thus need to ensure Instruction and Data cache coherency. */
            DEBUG_LOG("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
            cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
                       reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
        }
#endif
        /* Only count if we haven't already decompressed parts of the chunk */
        if (chunkAvail[chunk] == 0)
            chunkAvailNum++;

        chunkAvail[chunk] = PageNumber(length);
    }

    /* Flip the chunk mapping protection to the recorded flags. We could
     * also flip the protection for other mappings of the same chunk,
     * but it's easier to skip that and let further segfaults call
     * ensure again. */
    const void *chunkAddr = reinterpret_cast<const void *>
                            (reinterpret_cast<uintptr_t>(addrPage)
                             - mapOffset % zStream.GetChunkSize());
    const void *chunkEndAddr = reinterpret_cast<const void *>
                               (reinterpret_cast<uintptr_t>(chunkAddr) + length);

    const void *start = std::max(map->addr, chunkAddr);
    const void *end = std::min(map->end(), chunkEndAddr);
    length = reinterpret_cast<uintptr_t>(end)
             - reinterpret_cast<uintptr_t>(start);

    DEBUG_LOG("mprotect @%p, 0x%" PRIxSize ", 0x%x", start, length, map->prot);
    if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
        return true;

    LOG("mprotect failed");
    return false;
}
Exemplo n.º 19
0
int checkMutatedFile(){


        int       cnt,fd;
        char execStr[256];
	int retVal = 0;
        void *result;
	unsigned int mmapAddr;
	int pageSize;
	Address dataAddress;
	int dataSize;
       	char* tmpPtr;
        unsigned int updateAddress, updateSize, updateOffset;
        unsigned int *dataPtr;
 	unsigned int numberUpdates,i ;
	char* oldPageData;
	int sawFirstHeapTrampSection = 0;

	struct scnhdr *currScnhdr, *firstScnhdr;
	struct xcoffhdr *XCOFFhdr;
	char *data;
	void *XCOFFfile;
	unsigned int *numbNewHdrs;
	snprintf(execStr,255,"%s/dyninst_mutatedBinary",getenv("PWD"));

	int * trampGu=0;
	int trampGuSize;
	/*fprintf(stderr,"1SBRK %x (0x%x)\n",sbrk(0),&checkMutatedFile);*/
	XCOFFfile = loadFile(execStr, &fd);

	if(!XCOFFfile){
		/*printf("Cannot restore mutated binary\n\n");*/
		return 0;
	}

	/*fprintf(stderr," restoring mutated binary...");*/

	XCOFFhdr = (struct xcoffhdr*) XCOFFfile;


	/* 	WHAT IS GOING ON HERE?
		I cannot put the extra data (tramps, highmem data, shared lib info)
		in the middle of the file as in the ELF stuff.  I must place it at
		the end, AFTER the symbol table and string table.  Really this is
		like having two XCOFF files stuck together.  THe loader only pays
		attention to the first one (ie the one refered to by the file header
		info).  The stuff tacked on at the end is as follows:

		number of sections
		section header
		section header
		...
		section data
		section data
		...

		I need to find the end of the string table and then pull out the
		number of section headers tacked on to the end.  Then I can
		go through the section data a section at a time.
		
	*/

	/*fine end of symbol table*/
	firstScnhdr = (struct scnhdr*) ( ((char*) XCOFFfile) + XCOFFhdr->filehdr.f_symptr +
			XCOFFhdr->filehdr.f_nsyms * 18); /* 18 == sizeof symbol */
	/*find end of string table*/
	numbNewHdrs = (unsigned int*) ( ((char*) firstScnhdr ) + *((unsigned int*) firstScnhdr));
		/*(struct scnhdr*)  ( (char*) XCOFFfile + sizeof(struct xcoffhdr));*/

	firstScnhdr = (struct scnhdr *) ( sizeof(unsigned int) + ((char*) firstScnhdr ) + *((unsigned int*) firstScnhdr));

	currScnhdr = firstScnhdr;

	pageSize =  getpagesize();

	/*fprintf(stderr,"SBRK 0x%x 0x%x\n",XCOFFfile,sbrk(0));*/
	
   	for(currScnhdr = firstScnhdr, cnt=0; cnt < *numbNewHdrs; currScnhdr++, cnt++){
		if(!strcmp( currScnhdr->s_name, "trampg")){

			/*fprintf(stderr," >trampg 0x%x\n", currScnhdr->s_paddr);*/
			/*munmap(freeaddr,pageSize);
			if( currScnhdr->s_paddr != mmap(currScnhdr->s_paddr, currScnhdr->s_size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_ANONYMOUS, -1,0x0)){
				perror("MMAP");
			}


			*(int*)currScnhdr->s_paddr=1;*/
			trampGu = (int*)currScnhdr->s_paddr;
			trampGuSize = currScnhdr->s_size;

		}else if(!strcmp( currScnhdr->s_name, "dyn_lib")){
			/* use dlopen to load a list of shared libraries */

			int len;
                        void *old_brk, *new_brk;

			data = (char*) XCOFFfile + currScnhdr->s_scnptr;
                        memcpy( &new_brk, data + strlen(data) + 1, sizeof(void *));
			while(*data != '\0'){
                           if ((old_brk = sbrk(0)) > new_brk) {
                              printf("current BRK 0x%p > desired BRK 0x%p for %s!\n",
                                     old_brk,
                                     new_brk,
                                     data);
                              fflush(stdout);
                           } else {
                              brk(new_brk);
                           }
				DYNINSTloadLibrary(data);
				data += (strlen(data) +1 + sizeof(void *));
			}
		}else if(!strcmp( currScnhdr->s_name, "dyn_dat")){
			/* reload data */
			tmpPtr =  (char*) XCOFFfile + currScnhdr->s_scnptr;
			dataAddress = -1;
			while( dataAddress != 0 ) { 
				dataSize = *(int*) tmpPtr;
				tmpPtr+=sizeof(int);
				dataAddress = *(Address*) tmpPtr;
				tmpPtr += sizeof(Address);
				if(dataAddress){
					memcpy((char*) dataAddress, tmpPtr, dataSize);
					tmpPtr += dataSize;
					//fprintf(stderr,"dyn_dat: 0x%x (0x%x)\n",dataAddress,dataSize);
				}
			}
			retVal = 1;

		}else if(!strncmp( currScnhdr->s_name, "dyn_",4)){

			/* THIS SHOULD NEVER OCCUR!*/

			/* heap tramps */
			char *tmpStr;
			printf("%s\n",  currScnhdr->s_name);
			retVal = 1; /* this is a restored run */

			tmpStr = &currScnhdr->s_name[4]; 
			if( *tmpStr>=0x30 && *tmpStr <= 0x39 ) {
				/* this is a heap tramp section */
				if( sawFirstHeapTrampSection ){

					result = mmap((void*) currScnhdr->s_vaddr, currScnhdr->s_size, 
					PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS| MAP_FIXED,-1,0);
					if( result == (void *) -1){

						mprotect((void*) currScnhdr->s_vaddr, currScnhdr->s_size,
							PROT_READ|PROT_WRITE|PROT_EXEC);
						result = mmap((void*) currScnhdr->s_vaddr, currScnhdr->s_size, 
						PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS| MAP_FIXED,-1,0);
						if( result == (void *) -1){
						perror("TRYING TO MMAP:");
						switch(errno){
							case EACCES:
								printf(" EACCES \n");
								break;
							case EBADF:
								printf(" EBADF \n");
								break;
							case EINVAL:
								printf(" EINVAL\n");
								break;
							case ENOMEM:
								printf(" NO MEM!\n");
								break;
							default:
								printf(" OTHER ERROR %d\n",errno);
						}
						}
					}

					printf(" MMAP! vaddr %x size %x scnptr %x : result %p",currScnhdr->s_vaddr,currScnhdr->s_size,currScnhdr->s_scnptr, result );
					fflush(stdout);

					memcpy((void *) currScnhdr->s_vaddr, ((char*) XCOFFfile) + currScnhdr->s_scnptr,
						currScnhdr->s_size);
/*					result = mmap((void*) currScnhdr->s_vaddr, currScnhdr->s_size, 
					PROT_READ|PROT_WRITE|PROT_EXEC,
					MAP_FIXED|MAP_PRIVATE,fd,currScnhdr->s_scnptr);
					printf(" RESULT! %x", result);*/
				}else{

					printf(" mmecpy! %x %x %x",currScnhdr->s_vaddr,currScnhdr->s_size,
						((char*) XCOFFfile) + currScnhdr->s_scnptr );
					fflush(stdout);

					/*memcpy(currScnhdr->s_vaddr, ((char*) XCOFFfile) + currScnhdr->s_scnptr, currScnhdr->s_size);*/
					sawFirstHeapTrampSection = 1;
				}
			}

		}else if(!strncmp( currScnhdr->s_name, "dyH_", 4)){
			/* high mem tramps */

			/*the layout of dyninstAPIhighmem_ is:
			pageData
			address of update
			size of update
			...	
			address of update
			size of update	
			number of updates
	
			we must ONLY overwrite the updates, the other
			areas of the page may be important (and different than
			the saved data in the file.  we first copy out the
			page, the apply the updates to it, and then
			write it back.
			*/

			int oldPageDataSize;
			int dataSize = currScnhdr->s_size;
			/*Dl_info dlip;*/

			retVal = 1; /* just to be sure */

			data = (char*) XCOFFfile + currScnhdr->s_scnptr	;
			
			numberUpdates = (unsigned int) ( ((unsigned int*) data)[
				(dataSize - sizeof(unsigned int))/ sizeof(unsigned int) ]);
			oldPageDataSize = dataSize-((2*numberUpdates+1)*
				sizeof(unsigned int)) ;
			oldPageData = (char*) malloc(oldPageDataSize);
			/*copy old page data */

			/* probe memory to see if we own it */
			checkAddr = 1;/*dladdr((void*)currScnhdr->s_vaddr, &dlip); */

			updateSize  = dataSize-((2*numberUpdates+1)* sizeof(unsigned int));

            /* Actually have a checkAddr... */
            if (currScnhdr->s_vaddr >= DYNINSTheap_loAddr &&
                currScnhdr->s_vaddr <= DYNINSTheap_hiAddr)
                checkAddr = 0;
            
			if(!checkAddr){ 
				/* we dont own it,mmap it!*/
                int fdzero = open("/dev/zero", O_RDWR);
                mmapAddr = 0;
                mmapAddr =(unsigned int) mmap((void*) currScnhdr->s_vaddr,
                                              oldPageDataSize,
                                              PROT_READ|PROT_WRITE|PROT_EXEC,
                                              MAP_FIXED|MAP_PRIVATE,
                                              fdzero,
                                              0);
                close(fdzero);
				mmapAddr = (unsigned int) memcpy(oldPageData,
                                  (void*)currScnhdr->s_vaddr ,
                                  updateSize);
                checkAddr = 1;
			}else{
				/*we own it, finish the memcpy */
				mmapAddr = (unsigned int) memcpy(oldPageData, (void*)currScnhdr->s_vaddr , updateSize);
				checkAddr = 1; 
			}
	
			dataPtr =(unsigned int*) &(data[oldPageDataSize]);	
			/*apply updates*/
			for(i = 0; i< numberUpdates; i++){
				updateAddress = *dataPtr; 
				updateSize = *(++dataPtr);

				updateOffset = updateAddress -  currScnhdr->s_vaddr;
				/*do update*/	
				memcpy(&( oldPageData[updateOffset]),
						&(data[updateOffset]) , updateSize);	
				dataPtr ++;
			} 
			if(!checkAddr){
				mmapAddr =  currScnhdr->s_scnptr;
				mmapAddr =(unsigned int) mmap((void*) currScnhdr->s_vaddr,oldPageDataSize, 
					PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED| MAP_PRIVATE,fd,mmapAddr);

			}else{
				if( currScnhdr->s_vaddr < 0xd0000000){
				/*mmapAddr = currScnhdr->s_vaddr - (currScnhdr->s_vaddr %pageSize);

				mmapAddr = mmap(mmapAddr,currScnhdr->s_vaddr - mmapAddr + oldPageDataSize, 
					PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANONYMOUS|MAP_FIXED |MAP_PRIVATE,-1,0);
				
				perror(" MMAP ");
				printf(" MMAP: %x \n", mmapAddr);
				*/
				mmapAddr = currScnhdr->s_vaddr - (currScnhdr->s_vaddr %pageSize);

				checkAddr = mprotect((void *)mmapAddr, currScnhdr->s_vaddr - mmapAddr + oldPageDataSize, 
					PROT_READ|PROT_WRITE|PROT_EXEC);
				/*printf(" MPROTECT: %x %lx : %lx \n", checkAddr, mmapAddr,currScnhdr->s_vaddr - mmapAddr + oldPageDataSize
 );*/

				memcpy((void*)currScnhdr->s_vaddr, oldPageData,oldPageDataSize );
				}
			}
                        free(oldPageData);
		}

	}

	/*fprintf(stderr,"SBRK %x\n",sbrk(0));*/
	fflush(stdout);
        close(fd);
	munmap(XCOFFfile,fileSize);

	munmap(freeaddr,pageSize);
	result = mmap(trampGu, trampGuSize, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_ANONYMOUS, -1,0x0);
	if( result != trampGu){
		fprintf(stderr,"TrampGuards not reallocated correctly.  Mutated binary may not perform properly\n");
	}
	for(i=0; i< trampGuSize /sizeof(int); i++){
		trampGu[i]=1;
	}

	return retVal;
}
Exemplo n.º 20
0
Arquivo: pool.c Projeto: mslusarz/nvml
/*
 * pool_parse_params -- parse pool type, file size and block size
 */
static int
pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params,
	int check)
{
	LOG(3, NULL);
	int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT;

	params->type = POOL_TYPE_UNKNOWN;
	params->is_poolset = util_is_poolset_file(ppc->path) == 1;

	int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY);
	if (fd < 0)
		return -1;

	int ret = 0;

	util_stat_t stat_buf;
	ret = util_fstat(fd, &stat_buf);
	if (ret)
		goto out_close;

	ASSERT(stat_buf.st_size >= 0);

	params->mode = stat_buf.st_mode;

	struct pool_set *set;
	void *addr;
	if (params->is_poolset) {
		/*
		 * Need to close the poolset because it will be opened with
		 * flock in the following instructions.
		 */
		close(fd);
		fd = -1;

		if (check) {
			if (pool_set_map(ppc->path, &set, 0))
				return -1;
		} else {
			ret = util_poolset_create_set(&set, ppc->path, 0, 0);
			if (ret < 0) {
				LOG(2, "cannot open pool set -- '%s'",
					ppc->path);
				return -1;
			}
			if (set->remote) {
				ERR("poolsets with remote replicas are not "
					"supported");
				return -1;
			}
			if (util_pool_open_nocheck(set, 0))
				return -1;
		}

		params->size = set->poolsize;
		addr = set->replica[0]->part[0].addr;

		/*
		 * XXX mprotect for device dax with length not aligned to its
		 * page granularity causes SIGBUS on the next page fault.
		 * The length argument of this call should be changed to
		 * set->poolsize once the kernel issue is solved.
		 */
		if (mprotect(addr, set->replica[0]->repsize,
			PROT_READ) < 0) {
			ERR("!mprotect");
			goto out_unmap;
		}
		params->is_device_dax = set->replica[0]->part[0].is_dax;
	} else if (is_btt) {
		params->size = (size_t)stat_buf.st_size;
#ifndef _WIN32
		if (params->mode & S_IFBLK)
			if (ioctl(fd, BLKGETSIZE64, &params->size)) {
				ERR("!ioctl");
				goto out_close;
			}
#endif
		addr = NULL;
	} else {
		ssize_t s = util_file_get_size(ppc->path);
		if (s < 0) {
			ret = -1;
			goto out_close;
		}
		params->size = (size_t)s;
		addr = mmap(NULL, (uint64_t)params->size, PROT_READ,
			MAP_PRIVATE, fd, 0);
		if (addr == MAP_FAILED) {
			ret = -1;
			goto out_close;
		}
		params->is_device_dax = util_file_is_device_dax(ppc->path);
	}

	/* stop processing for BTT device */
	if (is_btt) {
		params->type = POOL_TYPE_BTT;
		params->is_part = false;
		goto out_close;
	}

	struct pool_hdr hdr;
	memcpy(&hdr, addr, sizeof(hdr));
	util_convert2h_hdr_nocheck(&hdr);
	pool_params_from_header(params, &hdr);

	if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) {
		enum pool_type declared_type =
			pmempool_check_type_to_pool_type(ppc->args.pool_type);
		if ((params->type & ~declared_type) != 0) {
			ERR("declared pool type does not match");
			ret = 1;
			goto out_unmap;
		}
	}

	if (params->type == POOL_TYPE_BLK) {
		struct pmemblk pbp;
		memcpy(&pbp, addr, sizeof(pbp));
		params->blk.bsize = le32toh(pbp.bsize);
	} else if (params->type == POOL_TYPE_OBJ) {
		struct pmemobjpool pop;
		memcpy(&pop, addr, sizeof(pop));
		memcpy(params->obj.layout, pop.layout,
			PMEMOBJ_MAX_LAYOUT);
	}

out_unmap:
	if (params->is_poolset) {
		ASSERTeq(fd, -1);
		ASSERTne(addr, NULL);
		util_poolset_close(set, 0);
	} else if (!is_btt) {
		ASSERTne(fd, -1);
		ASSERTne(addr, NULL);
		munmap(addr, params->size);
	}
out_close:
	if (fd != -1)
		close(fd);
	return ret;
}
Exemplo n.º 21
0
/*
 * test_mprotect_anon -- test memory protection on anonymous mappings
 */
static void
test_mprotect_anon(void)
{
	char *ptr1;
	char *ptr2;

	/* unknown PROT flag - should succeed */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
			MAP_PRIVATE|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_ALL + 1), 0);
	check_access(ptr1, MMAP_SIZE, PROT_NONE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);

	/* len == 0 - should succeed */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1, 0, PROT_READ), 0);
	check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);

	/* change protection: R/O => R/W */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
#ifndef _WIN32
	UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
	check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#else
	/* XXX - not supported yet */
	UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
	check_access(ptr1, MMAP_SIZE, PROT_READ);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#endif

	/* change protection; R/W => R/O */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
	check_access(ptr1, MMAP_SIZE, PROT_READ);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);

	/* change protection; R/W => none */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
			MAP_PRIVATE|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_NONE), 0);
	check_access(ptr1, MMAP_SIZE, PROT_NONE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);

	/* unaligned pointer - should fail */
	ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	errno = 0;
	UT_ASSERTne(mprotect(ptr1 + 100, MMAP_SIZE, PROT_READ), 0);
	UT_ASSERTeq(errno, EINVAL);
	check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);

	/* invalid pointer - should fail */
	errno = 0;
	UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
	UT_ASSERTeq(errno, ENOMEM);

	/* unaligned len - should succeed */
	ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
			MAP_PRIVATE|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1, PAGE_SIZE + 100, PROT_READ), 0);
	check_access(ptr1, PAGE_SIZE * 2, PROT_READ);
	check_access(ptr1 + PAGE_SIZE * 2, FILE_SIZE - PAGE_SIZE * 2,
			PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);

	/* partial protection change (on page boundary) */
	ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ), 0);
	UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE), 0);
	check_access(ptr1, PAGE_SIZE, PROT_READ|PROT_WRITE);
	check_access(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ);
	check_access(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE);
	check_access(ptr1 + PAGE_SIZE * 3, FILE_SIZE - PAGE_SIZE * 3,
			PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);

	/* range includes invalid addresses - should fail */
	ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE, MMAP_SIZE), 0);
	UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE * 3, MMAP_SIZE), 0);
	check_access(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_NONE);
	check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_NONE);

	errno = 0;
	UT_ASSERTne(mprotect(ptr1, MMAP_SIZE * 4, PROT_READ), 0);
	UT_ASSERTeq(errno, ENOMEM);
#ifndef _WIN32
	/* protection changed for all the pages up to the first invalid */
	check_access(ptr1, MMAP_SIZE, PROT_READ);
	check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ|PROT_WRITE);
#else
	/* XXX - protection changed for all the valid pages */
	check_access(ptr1, MMAP_SIZE, PROT_READ);
	check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ);
#endif
	UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);

	/* change protection on two adjacent mappings */
	ptr1 = mmap(ptr1, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, 0);
	UT_ASSERTne(ptr1, MAP_FAILED);
	ptr2 = mmap(ptr1 + MMAP_SIZE * 2, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
			MAP_SHARED|MAP_ANON, -1, MMAP_SIZE * 2);
	UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE * 2);
	UT_ASSERTeq(mprotect(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE), 0);
	check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
	check_access(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE);
	check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_READ|PROT_WRITE);
	UT_ASSERTeq(munmap(ptr1, MMAP_SIZE * 4), 0);
}
Exemplo n.º 22
0
void jit_compile_execute_main(void)
{
	unsigned const alignment  = 16;
	unsigned const align_mask = ~(alignment-1);

	optimize_lower_ir_prog();

	/* This is somewhat ad-hoc testing code for the jit, it is limited and
	 * will not handle all firm programs. */

	const struct {
		char const *name;
		void const *func;
	} external_functions[] = {
		{ "atoi",    (void const*)(intptr_t)atoi    },
		{ "free",    (void const*)(intptr_t)free    },
		{ "getchar", (void const*)(intptr_t)getchar },
		{ "malloc",  (void const*)(intptr_t)malloc  },
		{ "printf",  (void const*)(intptr_t)printf  },
		{ "puts",    (void const*)(intptr_t)puts    },
		{ "rand",    (void const*)(intptr_t)rand    },
		{ "realloc", (void const*)(intptr_t)realloc },
	};

	ir_jit_segment_t   *const segment   = be_new_jit_segment();
	ir_entity         **      funcents  = NEW_ARR_F(ir_entity*, 0);
	ir_jit_function_t **      functions = NEW_ARR_F(ir_jit_function_t*, 0);
	ir_entity          *main_entity     = NULL;

	size_t         code_size   = 0;
	ir_type *const global_type = get_glob_type();
	for (size_t i = 0, n = get_compound_n_members(global_type); i < n; ++i) {
		ir_entity  *const entity  = get_compound_member(global_type, i);
		char const *const ld_name = get_entity_ld_name(entity);

		if (is_method_entity(entity)
		 && !(get_entity_linkage(entity) & IR_LINKAGE_NO_CODEGEN)) {
			ir_graph *const irg = get_entity_irg(entity);
			if (irg != NULL) {
				ir_jit_function_t *const func = be_jit_compile(segment, irg);
				if (func == NULL)
					panic("Could not jit compile '%s'", ld_name);
				unsigned const misalign = alignment - (code_size%alignment);
				code_size += (misalign != alignment ? misalign : 0);

				ARR_APP1(ir_jit_function_t*, functions, func);
				ARR_APP1(ir_entity*, funcents, entity);
				code_size += be_get_function_size(func);
				if (streq(ld_name, "main") || streq(ld_name, "_main"))
					main_entity = entity;
				continue;
			}
		}

		/* See if its one of our well-known functions */
		for (unsigned f = 0; f < ARRAY_SIZE(external_functions); ++f) {
			char const *const name = external_functions[f].name;
			void const *const func = external_functions[f].func;
			if (streq(ld_name, name)
			 || (ld_name[0] == '_' && streq(&ld_name[1], name))) {
				be_jit_set_entity_addr(entity, func);
				break;
			}
		}
	}

	if (main_entity == NULL)
		panic("Could not find main() function");

	/* Allocate executable memory */
	char *const memory = mmap(0, code_size, PROT_READ | PROT_WRITE,
	                          MAP_PRIVATE | MAP_ANON, -1, 0);
	if (memory == NULL)
		panic("Could not mmap memory");

	/* Determine final function addresses */
	size_t const n_functions = ARR_LEN(functions);
	assert(ARR_LEN(funcents) == n_functions);
	unsigned offset = 0;
	for (size_t i = 0; i < n_functions; ++i) {
		offset = (offset + alignment - 1) & align_mask;
		char      *const dest   = memory + offset;
		ir_entity *const entity = funcents[i];
		be_jit_set_entity_addr(entity, dest);
		offset += be_get_function_size(functions[i]);
	}
	assert(offset == code_size);

	/* Emit and resolve */
	for (size_t i = 0; i < n_functions; ++i) {
		ir_entity  *const entity = funcents[i];
		char       *const dest
			= memory + ((char const*)be_jit_get_entity_addr(entity) - memory);
		be_emit_function(dest, functions[i]);
	}
	be_destroy_jit_segment(segment);

	if (mprotect(memory, code_size, PROT_EXEC) != 0)
		panic("Couldn't make memory executable");

	typedef int (*mainfunc)(int argc, char **argv);
	mainfunc main_ptr = (mainfunc)(intptr_t)be_jit_get_entity_addr(main_entity);
	int res = main_ptr(0, NULL);
	fprintf(stderr, "Exit code: %d\n", res);

	munmap(memory, code_size);
}
Exemplo n.º 23
0
/*
 * TODO(bsy): handle the !NACL_ABI_MAP_FIXED case.
 */
uintptr_t NaClHostDescMap(struct NaClHostDesc *d,
                          struct NaClDescEffector *effp,
                          void                *start_addr,
                          size_t              len,
                          int                 prot,
                          int                 flags,
                          nacl_off64_t        offset) {
  int   desc;
  void  *map_addr;
  int   host_prot;
  int   tmp_prot;
  int   host_flags;
  int   need_exec;
  UNREFERENCED_PARAMETER(effp);

  NaClLog(4,
          ("NaClHostDescMap(0x%08"NACL_PRIxPTR", "
           "0x%08"NACL_PRIxPTR", 0x%08"NACL_PRIxS", "
           "0x%x, 0x%x, 0x%08"NACL_PRIx64")\n"),
          (uintptr_t) d,
          (uintptr_t) start_addr,
          len,
          prot,
          flags,
          (int64_t) offset);
  if (NULL == d && 0 == (flags & NACL_ABI_MAP_ANONYMOUS)) {
    NaClLog(LOG_FATAL, "NaClHostDescMap: 'this' is NULL and not anon map\n");
  }
  if (NULL != d && -1 == d->d) {
    NaClLog(LOG_FATAL, "NaClHostDescMap: already closed\n");
  }
  if ((0 == (flags & NACL_ABI_MAP_SHARED)) ==
      (0 == (flags & NACL_ABI_MAP_PRIVATE))) {
    NaClLog(LOG_FATAL,
            "NaClHostDescMap: exactly one of NACL_ABI_MAP_SHARED"
            " and NACL_ABI_MAP_PRIVATE must be set.\n");
  }

  prot &= NACL_ABI_PROT_MASK;

  if (flags & NACL_ABI_MAP_ANONYMOUS) {
    desc = -1;
  } else {
    desc = d->d;
  }
  /*
   * Translate prot, flags to host_prot, host_flags.
   */
  host_prot = NaClProtMap(prot);
  host_flags = NaClMapFlagMap(flags);

  NaClLog(4, "NaClHostDescMap: host_prot 0x%x, host_flags 0x%x\n",
          host_prot, host_flags);

  /*
   * In chromium-os, the /dev/shm and the user partition (where
   * installed apps live) are mounted no-exec, and a special
   * modification was made to the chromium-os version of the Linux
   * kernel to allow mmap to use files as backing store with
   * PROT_EXEC. The standard mmap code path will fail mmap requests
   * that ask for PROT_EXEC, but mprotect will allow chaning the
   * permissions later. This retains most of the defense-in-depth
   * property of disallowing PROT_EXEC in mmap, but enables the use
   * case of getting executable code from a file without copying.
   *
   * See https://code.google.com/p/chromium/issues/detail?id=202321
   * for details of the chromium-os change.
   */
  tmp_prot = host_prot & ~PROT_EXEC;
  need_exec = (0 != (PROT_EXEC & host_prot));
  map_addr = mmap(start_addr, len, tmp_prot, host_flags, desc, offset);
  if (need_exec && MAP_FAILED != map_addr) {
    if (0 != mprotect(map_addr, len, host_prot)) {
      /*
       * Not being able to turn on PROT_EXEC is fatal: we have already
       * replaced the original mapping -- restoring them would be too
       * painful.  Without scanning /proc (disallowed by outer
       * sandbox) or Mach's vm_region call, there is no way
       * simple/direct to figure out what was there before.  On Linux
       * we could have mremap'd the old memory elsewhere, but still
       * would require probing to find the contiguous memory segments
       * within the original address range.  And restoring dirtied
       * pages on OSX the mappings for which had disappeared may well
       * be impossible (getting clean copies of the pages is feasible,
       * but insufficient).
       */
      NaClLog(LOG_FATAL,
              "NaClHostDescMap: mprotect to turn on PROT_EXEC failed,"
              " errno %d\n", errno);
    }
  }

  NaClLog(4, "NaClHostDescMap: mmap returned %"NACL_PRIxPTR"\n",
          (uintptr_t) map_addr);

  if (MAP_FAILED == map_addr) {
    NaClLog(LOG_INFO,
            ("NaClHostDescMap: "
             "mmap(0x%08"NACL_PRIxPTR", 0x%"NACL_PRIxS", "
             "0x%x, 0x%x, 0x%d, 0x%"NACL_PRIx64")"
             " failed, errno %d.\n"),
            (uintptr_t) start_addr, len, host_prot, host_flags, desc,
            (int64_t) offset,
            errno);
    return -NaClXlateErrno(errno);
  }
  if (0 != (flags & NACL_ABI_MAP_FIXED) && map_addr != start_addr) {
    NaClLog(LOG_FATAL,
            ("NaClHostDescMap: mmap with MAP_FIXED not fixed:"
             " returned 0x%08"NACL_PRIxPTR" instead of 0x%08"NACL_PRIxPTR"\n"),
            (uintptr_t) map_addr,
            (uintptr_t) start_addr);
  }
  NaClLog(4, "NaClHostDescMap: returning 0x%08"NACL_PRIxPTR"\n",
          (uintptr_t) map_addr);

  return (uintptr_t) map_addr;
}
Exemplo n.º 24
0
int main(int _argc, char **_argv)
{
	sigset_t set;
	int i, sig, goto_daemon = 0, len;
	pid_t pid = 0;
	struct sigaction sa;
	int pagesize = sysconf(_SC_PAGE_SIZE);
	int internal = 0;

	argc = _argc;
	argv = _argv;

	if (argc < 2)
		goto usage;

	for(i = 1; i < argc; i++) {
		if (!strcmp(argv[i], "-d"))
			goto_daemon = 1;
		else if (!strcmp(argv[i], "-p")) {
			if (i == argc - 1)
				goto usage;
			pid_file = argv[++i];
		} else if (!strcmp(argv[i], "-c")) {
			if (i == argc - 1)
				goto usage;
			conf_file = argv[++i];
		} else if (!strcmp(argv[i], "--dump")) {
			if (i == argc - 1)
				goto usage;
			len = (strlen(argv[i + 1]) / pagesize + 1) * pagesize;
			conf_dump = memalign(pagesize, len);
			strcpy(conf_dump, argv[++i]);
			mprotect(conf_dump, len, PROT_READ);
		} else if (!strcmp(argv[i], "--internal"))
			internal = 1;
	}

	if (!conf_file)
		goto usage;

	if (internal) {
		while (getppid() != 1)
			sleep(1);
	}

	if (triton_init(conf_file))
		_exit(EXIT_FAILURE);

	if (goto_daemon && pid != getpid()) {
		/*pid_t pid = fork();
		if (pid > 0)
			_exit(EXIT_SUCCESS);
		if (pid < 0) {
			perror("fork");
			return EXIT_FAILURE;
		}
		if (setsid() < 0)
			_exit(EXIT_FAILURE);
		pid = fork();
		if (pid)
			_exit(0);
		umask(0);
		chdir("/");
		close(STDIN_FILENO);
		close(STDOUT_FILENO);
		close(STDERR_FILENO);*/
		daemon(0, 0);
	}

	if (pid_file) {
		FILE *f = fopen(pid_file, "w");
		if (f) {
			fprintf(f, "%i", getpid());
			fclose(f);
		}
	}

	change_limits();

#ifdef CRYPTO_OPENSSL
	openssl_init();
#endif

	if (triton_load_modules("modules"))
		return EXIT_FAILURE;

	log_msg("accel-ppp version %s\n", ACCEL_PPP_VERSION);

	triton_run();


	sigfillset(&set);

	memset(&sa, 0, sizeof(sa));
	sa.sa_handler = config_reload;
	sa.sa_mask = set;
	sigaction(SIGUSR1, &sa, NULL);

	sa.sa_handler = sigsegv;
	sa.sa_mask = set;
	sigaction(SIGSEGV, &sa, NULL);


	sigdelset(&set, SIGKILL);
	sigdelset(&set, SIGSTOP);
	sigdelset(&set, SIGSEGV);
	sigdelset(&set, SIGFPE);
	sigdelset(&set, SIGILL);
	sigdelset(&set, SIGBUS);
	sigdelset(&set, SIGHUP);
	sigdelset(&set, SIGIO);
	sigdelset(&set, SIGINT);
	sigdelset(&set, SIGUSR1);
	sigdelset(&set, 35);
	sigdelset(&set, 36);
	pthread_sigmask(SIG_SETMASK, &set, &orig_set);

	sigemptyset(&set);
	//sigaddset(&set, SIGINT);
	sigaddset(&set, SIGTERM);
	sigaddset(&set, SIGSEGV);
	sigaddset(&set, SIGILL);
	sigaddset(&set, SIGFPE);
	sigaddset(&set, SIGBUS);

#ifdef USE_BACKUP
	backup_restore(internal);
#endif

	sigwait(&set, &sig);
	log_info1("terminate, sig = %i\n", sig);

	ap_shutdown_soft(shutdown_cb, 1);

	pthread_mutex_lock(&lock);
	while (!term)
		pthread_cond_wait(&cond, &lock);
	pthread_mutex_unlock(&lock);

	triton_terminate();

	if (restart != -1)
		__core_restart(restart);

	if (pid_file)
		unlink(pid_file);

	return EXIT_SUCCESS;

usage:
	printf("usage: accel-pppd [-d] [-p <file>] -c <file>\n\
	where:\n\
		-d - daemon mode\n\
		-p - write pid to <file>\n\
		-c - config file\n");
	_exit(EXIT_FAILURE);
}
Exemplo n.º 25
0
int main(int argc, char **argv)
{
	unsigned char *zero_mem, *test1_mem, *test2_mem;
	struct sched_param param = { .sched_priority = 1 };
	struct timespec zero = { .tv_sec = 0, .tv_nsec = 0 };
	struct sigaction sa;

	zero_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ,
			      MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	test1_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));

	sigemptyset(&sa.sa_mask);
	sa.sa_sigaction = sigdebug_handler;
	sa.sa_flags = SA_SIGINFO;
	check_unix(sigaction(SIGDEBUG, &sa, NULL));

	check_unix(mlockall(MCL_CURRENT | MCL_FUTURE));

	check_pthread(pthread_setschedparam(pthread_self(), SCHED_FIFO, &param));

	printf("memory read\n");
	check_value("read mem", test1_mem[0], 0);

	pthread_set_mode_np(PTHREAD_WARNSW, 0);
	test2_mem = check_mmap(mmap(0, MEMSIZE, PROT_READ | PROT_WRITE,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	check_unix(mprotect(test2_mem, MEMSIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC));

	nanosleep(&zero, NULL);
	pthread_set_mode_np(0, PTHREAD_WARNSW);

	printf("memory write after exec enable\n");
	test2_mem[0] = 0xff;

	pthread_set_mode_np(PTHREAD_WARNSW, 0);
	check_unix(mprotect(test1_mem, MEMSIZE, PROT_READ | PROT_WRITE));

	nanosleep(&zero, NULL);
	pthread_set_mode_np(0, PTHREAD_WARNSW);

	printf("memory write after write enable\n");
	test1_mem[0] = 0xff;
	check_value("read zero", zero_mem[0], 0);

	pthread_set_mode_np(PTHREAD_WARNSW, 0);

	test1_mem = check_mmap(mmap(0, MEMSIZE, PROT_NONE,
				    MAP_PRIVATE | MAP_ANONYMOUS, 0, 0));
	check_unix(mprotect(test1_mem, MEMSIZE, PROT_READ | PROT_WRITE));

	printf("memory read/write after access enable\n");
	check_value("read mem", test1_mem[0], 0);
	test1_mem[0] = 0xff;
	check_value("read zero", zero_mem[0], 0);

	fprintf(stderr, "Test OK\n");

	return 0;
}
Exemplo n.º 26
0
static int
generate_rowbytes(int src_w, int dst_w, int bpp)
{
    static struct
    {
        int bpp;
        int src_w;
        int dst_w;
        int status;
    } last;

    int i;
    int pos, inc;
    unsigned char *eip, *fence;
    unsigned char load, store;

    /* See if we need to regenerate the copy buffer */
    if ((src_w == last.src_w) && (dst_w == last.dst_w) && (bpp == last.bpp)) {
        return (last.status);
    }
    last.bpp = bpp;
    last.src_w = src_w;
    last.dst_w = dst_w;
    last.status = -1;

    switch (bpp) {
    case 1:
        load = LOAD_BYTE;
        store = STORE_BYTE;
        break;
    case 2:
    case 4:
        load = LOAD_WORD;
        store = STORE_WORD;
        break;
    default:
        return SDL_SetError("ASM stretch of %d bytes isn't supported\n", bpp);
    }
#ifdef HAVE_MPROTECT
    /* Make the code writeable */
    if (mprotect(copy_row, sizeof(copy_row), PROT_READ | PROT_WRITE) < 0) {
        return SDL_SetError("Couldn't make copy buffer writeable");
    }
#endif
    pos = 0x10000;
    inc = (src_w << 16) / dst_w;
    eip = copy_row;
    fence = copy_row + sizeof(copy_row)-2;
    for (i = 0; i < dst_w; ++i) {
        while (pos >= 0x10000L) {
            if (eip == fence) {
                return -1;
            }
            if (bpp == 2) {
                *eip++ = PREFIX16;
            }
            *eip++ = load;
            pos -= 0x10000L;
        }
        if (eip == fence) {
            return -1;
        }
        if (bpp == 2) {
            *eip++ = PREFIX16;
        }
        *eip++ = store;
        pos += inc;
    }
    *eip++ = RETURN;

#ifdef HAVE_MPROTECT
    /* Make the code executable but not writeable */
    if (mprotect(copy_row, sizeof(copy_row), PROT_READ | PROT_EXEC) < 0) {
        return SDL_SetError("Couldn't make copy buffer executable");
    }
#endif
    last.status = 0;
    return (0);
}
Exemplo n.º 27
0
static void
heap_space_unprotect(struct heap_space *heap)
{
	mprotect(heap->base, heap->limit - heap->base, PROT_READ | PROT_WRITE);
}
Exemplo n.º 28
0
int         kHlpSys_mprotect(void *addr, KSIZE len, int prot)
{
    if (!mprotect(addr, len, prot))
        return 0;
    return -errno;
}
Exemplo n.º 29
0
/**
 * Protects all the strings in the sandbox's parameter list configuration. It
 * works by calculating the total amount of memory required by the parameter
 * list, allocating the memory using mmap, and protecting it from writes with
 * mprotect().
 */
static int
prot_strings(scmp_filter_ctx ctx, sandbox_cfg_t* cfg)
{
  int ret = 0;
  size_t pr_mem_size = 0, pr_mem_left = 0;
  char *pr_mem_next = NULL, *pr_mem_base;
  sandbox_cfg_t *el = NULL;

  // get total number of bytes required to mmap
  for (el = cfg; el != NULL; el = el->next) {
    pr_mem_size += strlen((char*) ((smp_param_t*)el->param)->value) + 1;
  }

  // allocate protected memory with MALLOC_MP_LIM canary
  pr_mem_base = (char*) mmap(NULL, MALLOC_MP_LIM + pr_mem_size,
      PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  if (pr_mem_base == MAP_FAILED) {
    log_err(LD_BUG,"(Sandbox) failed allocate protected memory! mmap: %s",
        strerror(errno));
    ret = -1;
    goto out;
  }

  pr_mem_next = pr_mem_base + MALLOC_MP_LIM;
  pr_mem_left = pr_mem_size;

  // change el value pointer to protected
  for (el = cfg; el != NULL; el = el->next) {
    char *param_val = (char*)((smp_param_t *)el->param)->value;
    size_t param_size = strlen(param_val) + 1;

    if (pr_mem_left >= param_size) {
      // copy to protected
      memcpy(pr_mem_next, param_val, param_size);

      // re-point el parameter to protected
      {
        void *old_val = (void *) ((smp_param_t*)el->param)->value;
        tor_free(old_val);
      }
      ((smp_param_t*)el->param)->value = (intptr_t) pr_mem_next;
      ((smp_param_t*)el->param)->prot = 1;

      // move next available protected memory
      pr_mem_next += param_size;
      pr_mem_left -= param_size;
    } else {
      log_err(LD_BUG,"(Sandbox) insufficient protected memory!");
      ret = -2;
      goto out;
    }
  }

  // protecting from writes
  if (mprotect(pr_mem_base, MALLOC_MP_LIM + pr_mem_size, PROT_READ)) {
    log_err(LD_BUG,"(Sandbox) failed to protect memory! mprotect: %s",
        strerror(errno));
    ret = -3;
    goto out;
  }

  /*
   * Setting sandbox restrictions so the string memory cannot be tampered with
   */
  // no mremap of the protected base address
  ret = seccomp_rule_add(ctx, SCMP_ACT_KILL, SCMP_SYS(mremap), 1,
      SCMP_CMP(0, SCMP_CMP_EQ, (intptr_t) pr_mem_base));
  if (ret) {
    log_err(LD_BUG,"(Sandbox) mremap protected memory filter fail!");
    return ret;
  }

  // no munmap of the protected base address
  ret = seccomp_rule_add(ctx, SCMP_ACT_KILL, SCMP_SYS(munmap), 1,
        SCMP_CMP(0, SCMP_CMP_EQ, (intptr_t) pr_mem_base));
  if (ret) {
    log_err(LD_BUG,"(Sandbox) munmap protected memory filter fail!");
    return ret;
  }

  /*
   * Allow mprotect with PROT_READ|PROT_WRITE because openssl uses it, but
   * never over the memory region used by the protected strings.
   *
   * PROT_READ|PROT_WRITE was originally fully allowed in sb_mprotect(), but
   * had to be removed due to limitation of libseccomp regarding intervals.
   *
   * There is a restriction on how much you can mprotect with R|W up to the
   * size of the canary.
   */
  ret = seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mprotect), 2,
      SCMP_CMP(0, SCMP_CMP_LT, (intptr_t) pr_mem_base),
      SCMP_CMP(1, SCMP_CMP_LE, MALLOC_MP_LIM),
      SCMP_CMP(2, SCMP_CMP_EQ, PROT_READ|PROT_WRITE));
  if (ret) {
    log_err(LD_BUG,"(Sandbox) mprotect protected memory filter fail (LT)!");
    return ret;
  }

  ret = seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mprotect), 2,
      SCMP_CMP(0, SCMP_CMP_GT, (intptr_t) pr_mem_base + pr_mem_size +
          MALLOC_MP_LIM),
      SCMP_CMP(1, SCMP_CMP_LE, MALLOC_MP_LIM),
      SCMP_CMP(2, SCMP_CMP_EQ, PROT_READ|PROT_WRITE));
  if (ret) {
    log_err(LD_BUG,"(Sandbox) mprotect protected memory filter fail (GT)!");
    return ret;
  }

 out:
   return ret;
}
Exemplo n.º 30
0
static SCM
load_thunk_from_memory (char *data, size_t len, int is_read_only)
#define FUNC_NAME "load-thunk-from-memory"
{
  Elf_Ehdr *header;
  Elf_Phdr *ph;
  const char *err_msg = 0;
  size_t n, alignment = 8;
  int i;
  int dynamic_segment = -1;
  SCM init = SCM_BOOL_F, entry = SCM_BOOL_F;
  char *frame_maps = 0;

  if (len < sizeof *header)
    ABORT ("object file too small");

  header = (Elf_Ehdr*) data;
  
  if ((err_msg = check_elf_header (header)))
    goto cleanup;

  if (header->e_phnum == 0)
    ABORT ("no loadable segments");
  n = header->e_phnum;

  if (len < header->e_phoff + n * sizeof (Elf_Phdr))
    ABORT ("object file too small");

  ph = (Elf_Phdr*) (data + header->e_phoff);

  /* Check that the segment table is sane.  */
  for (i = 0; i < n; i++)
    {
      if (ph[i].p_filesz != ph[i].p_memsz)
        ABORT ("expected p_filesz == p_memsz");

      if (!ph[i].p_flags)
        ABORT ("expected nonzero segment flags");

      if (ph[i].p_align < alignment)
        {
          if (ph[i].p_align % alignment)
            ABORT ("expected new alignment to be multiple of old");
          alignment = ph[i].p_align;
        }

      if (ph[i].p_type == PT_DYNAMIC)
        {
          if (dynamic_segment >= 0)
            ABORT ("expected only one PT_DYNAMIC segment");
          dynamic_segment = i;
          continue;
        }

      if (ph[i].p_type != PT_LOAD)
        ABORT ("unknown segment type");

      if (i == 0)
        {
          if (ph[i].p_vaddr != 0)
            ABORT ("first loadable vaddr is not 0");
        }
      else
        {
          if (ph[i].p_vaddr < ph[i-1].p_vaddr + ph[i-1].p_memsz)
            ABORT ("overlapping segments");

          if (ph[i].p_offset + ph[i].p_filesz > len)
            ABORT ("segment beyond end of byte array");
        }
    }

  if (dynamic_segment < 0)
    ABORT ("no PT_DYNAMIC segment");

  if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
    ABORT ("incorrectly aligned base");

  /* Allow writes to writable pages.  */
  if (is_read_only)
    {
#ifdef HAVE_SYS_MMAN_H
      for (i = 0; i < n; i++)
        {
          if (ph[i].p_type != PT_LOAD)
            continue;
          if (ph[i].p_flags == PF_R)
            continue;
          if (ph[i].p_align != 4096)
            continue;

          if (mprotect (data + ph[i].p_vaddr,
                        ph[i].p_memsz,
                        segment_flags_to_prot (ph[i].p_flags)))
            goto cleanup;
        }
#else
      ABORT ("expected writable pages");
#endif
    }

  if ((err_msg = process_dynamic_segment (data, &ph[dynamic_segment],
                                          &init, &entry, &frame_maps)))
    goto cleanup;

  if (scm_is_true (init))
    scm_call_0 (init);

  register_elf (data, len, frame_maps);

  /* Finally!  Return the thunk.  */
  return entry;

 cleanup:
  {
    if (errno)
      SCM_SYSERROR;
    scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file",
                    SCM_EOL);
  }
}