Exemple #1
0
static int create_shm(void)
{
	shm = alloc_shared(sizeof(struct shm_s));
	if (shm == NULL) {
		perror("mmap");
		return -1;
	}

	memset(shm, 0, sizeof(struct shm_s));

	shm->total_syscalls_done = 1;
	shm->regenerate = 0;

	if (user_specified_children != 0)
		shm->max_children = user_specified_children;
	else
		shm->max_children = sysconf(_SC_NPROCESSORS_ONLN);

	if (shm->max_children > MAX_NR_CHILDREN) {
		printf("Increase MAX_NR_CHILDREN!\n");
		exit(EXIT_FAILURE);
	}
	memset(shm->pids, EMPTY_PIDSLOT, sizeof(shm->pids));

	shm->parentpid = getpid();

	shm->seed = init_seed(seed);

	return 0;
}
Exemple #2
0
void init_shm(void)
{
	unsigned int i;
	unsigned int childptrslen;

	output(2, "shm is at %p\n", shm);

	if (set_debug == TRUE)
		shm->debug = TRUE;

	shm->stats.total_syscalls_done = 1;

	if (user_set_seed == TRUE)
		shm->seed = init_seed(seed);
	else
		shm->seed = new_seed();
	/* Set seed in parent thread */
	set_seed(NULL);

	childptrslen = max_children * sizeof(struct childdata *);
	/* round up to page size */
	childptrslen += page_size - 1;
	childptrslen &= PAGE_MASK;

	shm->children = memalign(page_size, childptrslen);
	if (shm->children == NULL) {
		printf("Failed to allocate child structures.\n");
		exit(EXIT_FAILURE);
	}

	memset(shm->children, 0, childptrslen);

	/* We allocate the childdata structs as shared mappings, because
	 * the watchdog process needs to peek into each childs syscall records
	 * to make sure they are making progress.
	 */
	for_each_child(i) {
		struct childdata *child;

		child = alloc_shared(sizeof(struct childdata));
		shm->children[i] = child;

		memset(&child->syscall, 0, sizeof(struct syscallrecord));

		child->pid = EMPTY_PIDSLOT;

		child->num = i;

		init_child_logging(child);
	}
	mprotect(shm->children, childptrslen, PROT_READ);
}
Exemple #3
0
void create_shm(void)
{
	unsigned int nr_shm_pages;

	/* round up shm to nearest page size */
	shm_size = (sizeof(struct shm_s) + page_size - 1) & PAGE_MASK;
	nr_shm_pages = shm_size / page_size;

	/* Waste some address space to set up some "protection" near the SHM location. */
	shm = alloc_shared(shm_size);

	/* clear the whole shm. */
	memset(shm, 0, shm_size);
	printf("shm:%p-%p (%u pages)\n", shm, shm + shm_size - 1, nr_shm_pages);
}
Exemple #4
0
void benchmark_once_thread(struct thrarg *thrarg, unsigned iters)
{
	const int new_thread = 0;
	size_t i;
	unsigned nthreads = thrarg->params.threads;
	struct thrarg *thrargs;
	pthread_t threads[nthreads];
	pthread_attr_t attr;
	cpu_set_t c;
	const bool affinity = true;

	thrarg->params.iters = iters;

	if (!shared)
		shared = alloc_shared(sizeof(struct bench_shared));

	thrargs = shared->thrargs;
	shared->barrier = nthreads;

	pthread_attr_init(&attr);

	for (i=0; i < nthreads; i++) {
		thrargs[i] = *thrarg;
		thrargs[i].params.id = i;
	}

	i = (new_thread) ? 0 : 1;
	for (; i < nthreads; i++) {
		if (affinity) {
			CPU_ZERO(&c);
			CPU_SET(i, &c);
			pthread_attr_setaffinity_np(&attr, sizeof(c), &c);
		}
		pthread_create(&threads[i], &attr, thread, (void *)i);
	}

	if (!new_thread)
		thread((void *)(size_t)0);

	i = (new_thread) ? 0 : 1;
	for (; i < nthreads; i++)
		pthread_join(threads[i], NULL);

	thrarg->result.avg = thrargs[0].result.avg;
	thrarg->result.sum = thrargs[0].result.sum;
}
Exemple #5
0
/*
 * This changes the pointers in the table 'from' to be copies in
 * shared mmaps across all children.  We do this so that a child can
 * modify the flags field (adding AVOID for eg) and have other processes see the change.
 */
static struct syscalltable * copy_syscall_table(struct syscalltable *from, unsigned int nr)
{
	unsigned int n;
	struct syscallentry *copy;

	copy = alloc_shared(nr * sizeof(struct syscallentry));
	if (copy == NULL)
		exit(EXIT_FAILURE);

	for (n = 0; n < nr; n++) {
		memcpy(copy + n , from[n].entry, sizeof(struct syscallentry));
		copy[n].number = n;
		copy[n].active_number = 0;
		from[n].entry = &copy[n];
	}
	return from;
}
Exemple #6
0
void pids_init(void)
{
	unsigned int i;

	if (read_pid_max()) {
#ifdef __x86_64__
		pidmax = 4194304;
#else
		pidmax = 32768;
#endif
		outputerr("Couldn't read pid_max from proc\n");
	}

	output(0, "Using pid_max = %d\n", pidmax);

	pids = alloc_shared(max_children * sizeof(int));
	for_each_child(i)
		pids[i] = EMPTY_PIDSLOT;
}
Exemple #7
0
static int create_shm(void)
{
	void *p;
	unsigned int shm_pages;

	shm_pages = ((sizeof(struct shm_s) + page_size - 1) & ~(page_size - 1)) / page_size;

	/* Waste some address space to set up some "protection" near the SHM location. */
	p = alloc_shared((SHM_PROT_PAGES + shm_pages + SHM_PROT_PAGES) * page_size);
	if (p == NULL) {
		perror("mmap");
		return -1;
	}

	mprotect(p, SHM_PROT_PAGES * page_size, PROT_NONE);
	mprotect(p + (SHM_PROT_PAGES + shm_pages) * page_size,
			SHM_PROT_PAGES * page_size, PROT_NONE);

	shm = p + SHM_PROT_PAGES * page_size;

	memset(shm, 0, sizeof(struct shm_s));

	shm->total_syscalls_done = 1;
	shm->regenerate = 0;

	memset(shm->pids, EMPTY_PIDSLOT, sizeof(shm->pids));

	shm->nr_active_syscalls = 0;
	shm->nr_active_32bit_syscalls = 0;
	shm->nr_active_64bit_syscalls = 0;
	memset(shm->active_syscalls, 0, sizeof(shm->active_syscalls));
	memset(shm->active_syscalls32, 0, sizeof(shm->active_syscalls32));
	memset(shm->active_syscalls64, 0, sizeof(shm->active_syscalls64));

	/* Overwritten later in setup_shm_postargs if user passed -s */
	shm->seed = new_seed();

	/* Set seed in parent thread */
	set_seed(0);

	return 0;
}
Exemple #8
0
void create_shm(void)
{
	void *p;
	unsigned int shm_pages;

	/* round up shm to nearest page size */
	shm_pages = ((sizeof(struct shm_s) + page_size - 1) & ~(page_size - 1)) / page_size;

	/* Waste some address space to set up some "protection" near the SHM location. */
	p = alloc_shared((SHM_PROT_PAGES + shm_pages + SHM_PROT_PAGES) * page_size);

	/* clear whole mapping, including the redzones. */
	memset(p, 0, shm_pages * page_size);

	/* set the redzones to PROT_NONE */
	mprotect(p, SHM_PROT_PAGES * page_size, PROT_NONE);
	mprotect(p + (SHM_PROT_PAGES + shm_pages) * page_size,
			SHM_PROT_PAGES * page_size, PROT_NONE);

	shm = p + SHM_PROT_PAGES * page_size;
}
Exemple #9
0
void benchmark_once_fork(struct thrarg *thrarg, unsigned iters)
{
	size_t i;
	unsigned nthreads = thrarg->params.threads;
	struct thrarg *thrargs;
	cpu_set_t c;
	pid_t pids[nthreads];

	if (!shared)
		shared = alloc_shared(sizeof(struct bench_shared));

	thrargs = shared->thrargs;
	shared->barrier = nthreads;

	thrarg->params.iters = iters;

	for (i=0; i < nthreads; i++) {
		thrargs[i] = *thrarg;
		thrargs[i].params.id = i;
	}

	for (i=0; i < nthreads; i++) {
		CPU_ZERO(&c);
		CPU_SET(i, &c);
		pids[i] = fork();
		if (!pids[i]) {
			pthread_setaffinity_np(pthread_self(), sizeof(c), &c);
			thread((void *)i);
			exit(0);
		}
	}

	for (i=0; i < nthreads; i++)
		waitpid(pids[i], NULL, 0);

	thrarg->result.avg = thrargs[0].result.avg;
	thrarg->result.sum = thrargs[0].result.sum;
}
Exemple #10
0
/*
 * This changes the pointers in the table 'from' to be copies in
 * shared mmaps across all children.  We do this so that a child can
 * modify the flags field (adding AVOID for eg) and have other processes see the change.
 */
static struct syscalltable * copy_syscall_table(struct syscalltable *from, unsigned int nr)
{
	unsigned int n;
	struct syscall *copy;

	copy = alloc_shared(nr * sizeof(struct syscall));
	if (copy == NULL)
		exit(EXIT_FAILURE);

	for (n = 0; n < nr; n++) {
    if (from[n].entry != NULL) {
      memcpy(copy + n , from[n].entry, sizeof(struct syscall));
      copy[n].number = n;
      copy[n].active_number = 0;
      from[n].entry = &copy[n];
    } else {
      from[n].entry = malloc(sizeof(struct syscall));
      memset(from[n].entry, 0, sizeof(struct syscall));
      from[n].entry->flags = TO_BE_DEACTIVATED;
    }
	}
	return from;
}
Exemple #11
0
static void caml_oldify_one (value v, value *p)
{
  value result;
  header_t hd;
  mlsize_t sz, i;
  tag_t tag;

 tail_call:
  if (Is_block (v) && Is_young (v)){
    Assert (Hp_val (v) >= caml_domain_state->young_ptr);
    hd = Hd_val (v);
    stat_live_bytes += Bhsize_hd(hd);
    if (Is_promoted_hd (hd)) {
      *p = caml_addrmap_lookup(&caml_domain_state->remembered_set->promotion, v);
    } else if (hd == 0){         /* If already forwarded */
      *p = Op_val(v)[0];  /*  then forward pointer is first field. */
    }else{
      tag = Tag_hd (hd);
      if (tag < Infix_tag){
        value field0;

        sz = Wosize_hd (hd);
        result = alloc_shared (sz, tag);
        *p = result;
        if (tag == Stack_tag) {
          memcpy((void*)result, (void*)v, sizeof(value) * sz);
          Hd_val (v) = 0;
          Op_val(v)[0] = result;
          Op_val(v)[1] = oldify_todo_list;
          oldify_todo_list = v;
        } else {
          field0 = Op_val(v)[0];
          Hd_val (v) = 0;            /* Set forward flag */
          Op_val(v)[0] = result;     /*  and forward pointer. */
          if (sz > 1){
            Op_val (result)[0] = field0;
            Op_val (result)[1] = oldify_todo_list;    /* Add this block */
            oldify_todo_list = v;                    /*  to the "to do" list. */
          }else{
            Assert (sz == 1);
            p = Op_val(result);
            v = field0;
            goto tail_call;
          }
        }
      }else if (tag >= No_scan_tag){
        sz = Wosize_hd (hd);
        result = alloc_shared(sz, tag);
        for (i = 0; i < sz; i++) Op_val (result)[i] = Op_val(v)[i];
        Hd_val (v) = 0;            /* Set forward flag */
        Op_val (v)[0] = result;    /*  and forward pointer. */
        *p = result;
      }else if (tag == Infix_tag){
        mlsize_t offset = Infix_offset_hd (hd);
        caml_oldify_one (v - offset, p);   /* Cannot recurse deeper than 1. */
        *p += offset;
      } else{
        value f = Forward_val (v);
        tag_t ft = 0;
        int vv = 1;

        Assert (tag == Forward_tag);
        if (Is_block (f)){
          if (Is_young (f)){
            vv = 1;
            ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f);
          }else{
            vv = 1;
            if (vv){
              ft = Tag_val (f);
            }
          }
        }
        if (!vv || ft == Forward_tag || ft == Lazy_tag || ft == Double_tag){
          /* Do not short-circuit the pointer.  Copy as a normal block. */
          Assert (Wosize_hd (hd) == 1);
          result = alloc_shared (1, Forward_tag);
          *p = result;
          Hd_val (v) = 0;             /* Set (GC) forward flag */
          Op_val (v)[0] = result;      /*  and forward pointer. */
          p = Op_val (result);
          v = f;
          goto tail_call;
        }else{
          v = f;                        /* Follow the forwarding */
          goto tail_call;               /*  then oldify. */
        }
      }
    }
  }else{
    *p = v;
  }
}
Exemple #12
0
/* Note that the tests on the tag depend on the fact that Infix_tag,
   Forward_tag, and No_scan_tag are contiguous. */
static void oldify_one (void* st_v, value v, value *p)
{
  struct oldify_state* st = st_v;
  value result;
  header_t hd;
  mlsize_t sz, i;
  mlsize_t infix_offset;
  tag_t tag;
  caml_domain_state* domain_state =
    st->promote_domain ? st->promote_domain->state : Caml_state;
  char* young_ptr = domain_state->young_ptr;
  char* young_end = domain_state->young_end;
  CAMLassert (domain_state->young_start <= domain_state->young_ptr &&
          domain_state->young_ptr <= domain_state->young_end);

 tail_call:
  if (!(Is_block(v) && is_in_interval((value)Hp_val(v), young_ptr, young_end))) {
    /* not a minor block */
    *p = v;
    return;
  }

  infix_offset = 0;
  do {
    hd = Hd_val (v);
    if (hd == 0) {
      /* already forwarded, forward pointer is first field. */
      *p = Op_val(v)[0] + infix_offset;
      return;
    }
    tag = Tag_hd (hd);
    if (tag == Infix_tag) {
      /* Infix header, retry with the real block */
      CAMLassert (infix_offset == 0);
      infix_offset = Infix_offset_hd (hd);
      CAMLassert(infix_offset > 0);
      v -= infix_offset;
    }
  } while (tag == Infix_tag);

  if (((value)Hp_val(v)) > st->oldest_promoted) {
    st->oldest_promoted = (value)Hp_val(v);
  }

  if (tag == Cont_tag) {
    struct stack_info* stk = Ptr_val(Op_val(v)[0]);
    CAMLassert(Wosize_hd(hd) == 1 && infix_offset == 0);
    result = alloc_shared(1, Cont_tag);
    *p = result;
    Op_val(result)[0] = Val_ptr(stk);
    *Hp_val (v) = 0;
    Op_val(v)[0] = result;
    if (stk != NULL)
      caml_scan_stack(&oldify_one, st, stk);
  } else if (tag < Infix_tag) {
    value field0;
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared (sz, tag);
    *p = result + infix_offset;
    field0 = Op_val(v)[0];
    CAMLassert (!Is_debug_tag(field0));
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val(v)[0] = result;     /*  and forward pointer. */
    if (sz > 1){
      Op_val (result)[0] = field0;
      Op_val (result)[1] = st->todo_list;    /* Add this block */
      st->todo_list = v;                     /*  to the "to do" list. */
    }else{
      CAMLassert (sz == 1);
      p = Op_val(result);
      v = field0;
      goto tail_call;
    }
  } else if (tag >= No_scan_tag) {
    sz = Wosize_hd (hd);
    st->live_bytes += Bhsize_hd(hd);
    result = alloc_shared(sz, tag);
    for (i = 0; i < sz; i++) {
      value curr = Op_val(v)[i];
      Op_val (result)[i] = curr;
    }
    *Hp_val (v) = 0;           /* Set forward flag */
    Op_val (v)[0] = result;    /*  and forward pointer. */
    CAMLassert (infix_offset == 0);
    *p = result;
  } else {
    CAMLassert (tag == Forward_tag);
    CAMLassert (infix_offset == 0);

    value f = Forward_val (v);
    tag_t ft = 0;

    if (Is_block (f)) {
      ft = Tag_val (Hd_val (f) == 0 ? Op_val (f)[0] : f);
    }

    if (ft == Forward_tag || ft == Lazy_tag || ft == Double_tag) {
      /* Do not short-circuit the pointer.  Copy as a normal block. */
      CAMLassert (Wosize_hd (hd) == 1);
      st->live_bytes += Bhsize_hd(hd);
      result = alloc_shared (1, Forward_tag);
      *p = result;
      *Hp_val (v) = 0;             /* Set (GC) forward flag */
      Op_val (v)[0] = result;      /*  and forward pointer. */
      p = Op_val (result);
      v = f;
      goto tail_call;
    } else {
      v = f;                        /* Follow the forwarding */
      goto tail_call;               /*  then oldify. */
    }
  }
}
Exemple #13
0
void create_shm_arrays(void)
{
	shm->child_syscall_count = alloc_shared(max_children * sizeof(unsigned long));

	shm->pids = alloc_shared(max_children * sizeof(pid_t));

	shm->tv = alloc_shared(max_children * sizeof(struct timeval));

	shm->syscall = alloc_shared(max_children * sizeof(struct syscallrecord));
	shm->previous = alloc_shared(max_children * sizeof(struct syscallrecord));

	shm->mappings = alloc_shared(max_children * sizeof(struct map *));
	shm->num_mappings = alloc_shared(max_children * sizeof(unsigned int));

	shm->seeds = alloc_shared(max_children * sizeof(int));
	shm->kill_count = alloc_shared(max_children * sizeof(unsigned char));
	shm->logfiles = alloc_shared(max_children * sizeof(FILE *));
	shm->scratch = alloc_shared(max_children * sizeof(unsigned long));
}