Ejemplo n.º 1
0
int add_at_first(linkedlist_t* _list, void* data)
{
    linkedlist_t* list = (linkedlist_t*) _list;
    node* n, *slider;

    if(list->size == 0)
    {
        return add_at_last(list, data);
    }
    else if(list->size == 1)
    {
        n = node_new(data);
        set_head(list, n);
        set_prev(get_tail(list), n);
        set_next(n, get_tail(list));
        list->size++;
    }
    else // list->size > 1
    {
        slider = get_head(list);
        n = node_new(data);
        set_next(n, slider);
        set_prev(slider, n);
        set_head(list, n);
        list->size++;
    }
    return list->size;
}
Ejemplo n.º 2
0
void* linkedlist_remove(linkedlist _list, void* data)
{
    linkedlist_t* list = (linkedlist_t*) _list;
    node* n = find_by_data(list, data);

    if(NULL == n)
        return NULL;

    if(is_first(list, n)) // at the first
    {
        if(is_last(list, n)) // only one exists
        {
            set_head(list, NULL);
            set_tail(list, NULL);
        }
        else // one or more exist
        {
            set_head(list, get_next(n));
            set_prev(n, NULL);
        }
    }
    else if(is_last((linkedlist_t*)_list, n))
    {
        set_next(get_prev(n), NULL);
        set_tail(list, get_prev(n));
    }
    else
    {
        set_prev(get_next(n), get_prev(n));
        set_next(get_prev(n), get_next(n));
    }
    list->size--;
    free(n);
    return data;
}
Ejemplo n.º 3
0
static int
internal_function
heap_trim(heap_info *heap, size_t pad)
{
  mstate ar_ptr = heap->ar_ptr;
  unsigned long pagesz = GLRO(dl_pagesize);
  mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
  heap_info *prev_heap;
  long new_size, top_size, extra, prev_size, misalign;

  /* Can this heap go away completely? */
  while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
    prev_heap = heap->prev;
    prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
    p = chunk_at_offset(prev_heap, prev_size);
    /* fencepost must be properly aligned.  */
    misalign = ((long) p) & MALLOC_ALIGN_MASK;
    p = chunk_at_offset(prev_heap, prev_size - misalign);
    assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
    p = prev_chunk(p);
    new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
    assert(new_size>0 && new_size<(long)(2*MINSIZE));
    if(!prev_inuse(p))
      new_size += p->prev_size;
    assert(new_size>0 && new_size<HEAP_MAX_SIZE);
    if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
      break;
    ar_ptr->system_mem -= heap->size;
    arena_mem -= heap->size;
    delete_heap(heap);
    heap = prev_heap;
    if(!prev_inuse(p)) { /* consolidate backward */
      p = prev_chunk(p);
      unlink(p, bck, fwd);
    }
    assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
    assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
    top(ar_ptr) = top_chunk = p;
    set_head(top_chunk, new_size | PREV_INUSE);
    /*check_chunk(ar_ptr, top_chunk);*/
  }
  top_size = chunksize(top_chunk);
  extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
  if(extra < (long)pagesz)
    return 0;
  /* Try to shrink. */
  if(shrink_heap(heap, extra) != 0)
    return 0;
  ar_ptr->system_mem -= extra;
  arena_mem -= extra;

  /* Success. Adjust top accordingly. */
  set_head(top_chunk, (top_size - extra) | PREV_INUSE);
  /*check_chunk(ar_ptr, top_chunk);*/
  return 1;
}
Ejemplo n.º 4
0
void FreeList<Chunk>::getFirstNChunksFromList(size_t n, FreeList<Chunk>* fl) {
  assert_proper_lock_protection();
  assert(fl->count() == 0, "Precondition");
  if (count() > 0) {
    int k = 1;
    fl->set_head(head()); n--;
    Chunk* tl = head();
    while (tl->next() != NULL && n > 0) {
      tl = tl->next(); n--; k++;
    }
    assert(tl != NULL, "Loop Inv.");

    // First, fix up the list we took from.
    Chunk* new_head = tl->next();
    set_head(new_head);
    set_count(count() - k);
    if (new_head == NULL) {
      set_tail(NULL);
    } else {
      new_head->link_prev(NULL);
    }
    // Now we can fix up the tail.
    tl->link_next(NULL);
    // And return the result.
    fl->set_tail(tl);
    fl->set_count(k);
  }
}
Ejemplo n.º 5
0
// Dequeue an operation
SolarisAttachOperation* SolarisAttachListener::dequeue() {
  for (;;) {
    int res;

    // wait for somebody to enqueue something
    while ((res = ::sema_wait(wakeup())) == EINTR)
      ;
    if (res) {
      warning("sema_wait failed: %s", os::strerror(res));
      return NULL;
    }

    // lock the list
    res = os::Solaris::mutex_lock(mutex());
    assert(res == 0, "mutex_lock failed");

    // remove the head of the list
    SolarisAttachOperation* op = head();
    if (op != NULL) {
      set_head(op->next());
      if (head() == NULL) {
        set_tail(NULL);
      }
    }

    // unlock
    os::Solaris::mutex_unlock(mutex());

    // if we got an operation when return it.
    if (op != NULL) {
      return op;
    }
  }
}
Ejemplo n.º 6
0
// function definition
linkedlist linkedlist_new(void)
{
    int linkedlist_size = sizeof(linkedlist_t);
    linkedlist_t* list = (linkedlist_t*) malloc(linkedlist_size);
    set_head(list, NULL);
    set_tail(list, NULL);
    list->size = 0;
    return (linkedlist) list;
}
Ejemplo n.º 7
0
void FreeList<Chunk>::reset() {
  // Don't set the _size to 0 because this method is
  // used with a existing list that has a size but which has
  // been emptied.
  // Don't clear the _protecting_lock of an existing list.
  set_count(0);
  set_head(NULL);
  set_tail(NULL);
}
Ejemplo n.º 8
0
void FreeList<Chunk>::link_head(Chunk* v) {
  assert_proper_lock_protection();
  set_head(v);
  // If this method is not used (just set the head instead),
  // this check can be avoided.
  if (v != NULL) {
    v->link_prev(NULL);
  }
}
Ejemplo n.º 9
0
static mstate
_int_new_arena(size_t size)
{
  mstate a;
  heap_info *h;
  char *ptr;
  unsigned long misalign;

  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
	       mp_.top_pad);
  if(!h) {
    /* Maybe size is too large to fit in a single heap.  So, just try
       to create a minimally-sized arena and let _int_malloc() attempt
       to deal with the large request via mmap_chunk().  */
    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
    if(!h)
      return 0;
  }
  a = h->ar_ptr = (mstate)(h+1);
  malloc_init_state(a);
  /*a->next = NULL;*/
  a->system_mem = a->max_system_mem = h->size;
  arena_mem += h->size;

  /* Set up the top chunk, with proper alignment. */
  ptr = (char *)(a + 1);
  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
  if (misalign > 0)
    ptr += MALLOC_ALIGNMENT - misalign;
  top(a) = (mchunkptr)ptr;
  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);

  tsd_setspecific(arena_key, (void *)a);
  mutex_init(&a->mutex);
  (void)mutex_lock(&a->mutex);

#ifdef PER_THREAD
  (void)mutex_lock(&list_lock);
#endif

  /* Add the new arena to the global list.  */
  a->next = main_arena.next;
  atomic_write_barrier ();
  main_arena.next = a;

#ifdef PER_THREAD
  (void)mutex_unlock(&list_lock);
#endif

  THREAD_STAT(++(a->stat_lock_loop));

  return a;
}
Ejemplo n.º 10
0
// Enqueue an operation. This is called from a native thread that is not attached to VM.
// Also we need to be careful not to execute anything that results in more than a 4k stack.
//
int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
  // listener not running
  if (!AttachListener::is_initialized()) {
    return ATTACH_ERROR_DISABLED;
  }

  // check that all paramteres to the operation
  if (strlen(cmd) > AttachOperation::name_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(arg0) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(arg1) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(arg2) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(pipename) > Win32AttachOperation::pipe_name_max) return ATTACH_ERROR_ILLEGALARG;

  // check for a well-formed pipename
  if (strstr(pipename, "\\\\.\\pipe\\") != pipename) return ATTACH_ERROR_ILLEGALARG;

  // grab the lock for the list
  DWORD res = ::WaitForSingleObject(mutex(), INFINITE);
  if (res != WAIT_OBJECT_0) {
    return ATTACH_ERROR_INTERNAL;
  }

  // try to get an operation from the available list
  Win32AttachOperation* op = available();
  if (op != NULL) {
    set_available(op->next());

    // add to end (tail) of list
    op->set_next(NULL);
    if (tail() == NULL) {
      set_head(op);
    } else {
      tail()->set_next(op);
    }
    set_tail(op);

    op->set_name(cmd);
    op->set_arg(0, arg0);
    op->set_arg(1, arg1);
    op->set_arg(2, arg2);
    op->set_pipe(pipename);

    // Increment number of enqueued operations.
    // Side effect: Semaphore will be signaled and will release
    // any blocking waiters (i.e. the AttachListener thread).
    BOOL not_exceeding_semaphore_maximum_count =
      ::ReleaseSemaphore(enqueued_ops_semaphore(), 1, NULL);
    guarantee(not_exceeding_semaphore_maximum_count, "invariant");
  }
  ::ReleaseMutex(mutex());

  return (op != NULL) ? 0 : ATTACH_ERROR_RESOURCE;
}
Ejemplo n.º 11
0
void Basic_block::apply_scheduling(list <Node_dfg*> *new_order){
   list <Node_dfg*>::iterator it=new_order->begin();
   Instruction *inst=(*it)->get_instruction();
   Line *n=_head, *prevn=NULL;
   Line *end_next = _end->get_next();
   if(!n){
      cout<<"wrong bb : cannot apply"<<endl;
      return;
   }
   
   while(!n->isInst()){
     prevn=n;
     n=n->get_next();
     if(n==_end){
       cout<<"wrong bb : cannot apply"<<endl;
       return;
     }
   }
   
   //y'a des instructions, on sait pas si c'est le bon BB, mais on va supposer que oui
   inst->set_index(0);
   inst->set_prev(NULL);
   _firstInst = inst;
   n = inst;
   
   if(prevn){
     prevn->set_next(n);
     n->set_prev(prevn);
   }
   else{
     set_head(n);
   }

   int i;
   it++;
   for(i=1; it!=new_order->end(); it++, i++){

     inst->set_link_succ_pred((*it)->get_instruction());
     
     inst=(*it)->get_instruction();
     inst->set_index(i);
     prevn = n;
     n = inst;
     prevn->set_next(n);
     n->set_prev(prevn);
     }
   inst->set_next(NULL);
   _lastInst = inst;
   set_end(n);
   n->set_next(end_next);
   return;
}
Ejemplo n.º 12
0
/* ------------------------- __malloc_trim -------------------------
   __malloc_trim is an inverse of sorts to __malloc_alloc.  It gives memory
   back to the system (via negative arguments to sbrk) if there is unused
   memory at the `high' end of the malloc pool. It is called automatically by
   free() when top space exceeds the trim threshold. It is also called by the
   public malloc_trim routine.  It returns 1 if it actually released any
   memory, else 0.
*/
static int __malloc_trim(size_t pad, mstate av)
{
    long  top_size;        /* Amount of top-most memory */
    long  extra;           /* Amount to release */
    long  released;        /* Amount actually released */
    char* current_brk;     /* address returned by pre-check sbrk call */
    char* new_brk;         /* address returned by post-check sbrk call */
    size_t pagesz;

    pagesz = av->pagesize;
    top_size = chunksize(av->top);

    /* Release in pagesize units, keeping at least one page */
    extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;

    if (extra > 0) {

	/*
	   Only proceed if end of memory is where we last set it.
	   This avoids problems if there were foreign sbrk calls.
	   */
	current_brk = (char*)(MORECORE(0));
	if (current_brk == (char*)(av->top) + top_size) {

	    /*
	       Attempt to release memory. We ignore MORECORE return value,
	       and instead call again to find out where new end of memory is.
	       This avoids problems if first call releases less than we asked,
	       of if failure somehow altered brk value. (We could still
	       encounter problems if it altered brk in some very bad way,
	       but the only thing we can do is adjust anyway, which will cause
	       some downstream failure.)
	       */

	    MORECORE(-extra);
	    new_brk = (char*)(MORECORE(0));

	    if (new_brk != (char*)MORECORE_FAILURE) {
		released = (long)(current_brk - new_brk);

		if (released != 0) {
		    /* Success. Adjust top. */
		    av->sbrked_mem -= released;
		    set_head(av->top, (top_size - released) | PREV_INUSE);
		    check_malloc_state();
		    return 1;
		}
	    }
	}
    }
    return 0;
}
int set_intersection(Set *seti, const Set *set1, const Set *set2) {
  SetElm *member;
  set_init(seti, set1->match, NULL);
  for (member=set_head(set1); member != NULL; member = set_next(member)) {
    if (set_is_member(set2, set_data(member))) {
      if (set_insert(seti, set_data(member)) != 0) {
        set_destory(seti);
        return -1;
      }
    }
  }
  return 0;
}
int set_is_equal (const Set *set1, const Set *set2) {
  SetElm *member;

  if (set_size(set1) != set_size(set2))
    return 0;
  
  for (member = set_head(set1); member != NULL; member = set_next(member)) {
    if (!set_is_member(set2, set_data(member)))
      return 0;
  }

  return 1;
}
int set_union (Set *setu, const Set *set1, const Set *set2) {
  SetElm *member;
  set_init(setu, set1->match, NULL);

  for (member=set_head(set1); member != NULL; member = set_next(member)) {
    if (set_insert(setu, set_data(member)) != 0) {
      set_destory(setu);
      return -1;
    }
  }

  for (member=set_head(set2); member != NULL; member = set_next(member)) {
    switch (set_insert(setu, set_data(member))) {
      case 0:
      case 1:
        break;
      case -1:
        set_destory(setu);
        return -1;
        break;
    }
  }
  return 0;
}
// Enqueue an operation. This is called from a native thread that is not attached to VM.
// Also we need to be careful not to execute anything that results in more than a 4k stack.
//
int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
  // listener not running
  if (!AttachListener::is_initialized()) {
    return ATTACH_ERROR_DISABLED;
  }

  // check that all paramteres to the operation
  if (strlen(cmd) > AttachOperation::name_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(arg0) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(arg0) > AttachOperation::arg_length_max) return ATTACH_ERROR_ILLEGALARG;
  if (strlen(pipename) > Win32AttachOperation::pipe_name_max) return ATTACH_ERROR_ILLEGALARG;

  // check for a well-formed pipename
  if (strstr(pipename, "\\\\.\\pipe\\") != pipename) return ATTACH_ERROR_ILLEGALARG;

  // grab the lock for the list
  DWORD res = ::WaitForSingleObject(mutex(), INFINITE);
  if (res != WAIT_OBJECT_0) {
    return ATTACH_ERROR_INTERNAL;
  }

  // try to get an operation from the available list
  Win32AttachOperation* op = available();
  if (op != NULL) {
    set_available(op->next());

    // add to end (tail) of list
    op->set_next(NULL);
    if (tail() == NULL) {
      set_head(op);
    } else {
      tail()->set_next(op);
    }
    set_tail(op);

    op->set_name(cmd);
    op->set_arg(0, arg0);
    op->set_arg(1, arg1);
    op->set_arg(2, arg2);
    op->set_pipe(pipename);

    // wakeup the thread waiting for operations
    ::ReleaseSemaphore(wakeup(), 1, NULL);
  }
  ::ReleaseMutex(mutex());

  return (op != NULL) ? 0 : ATTACH_ERROR_RESOURCE;
}
Ejemplo n.º 17
0
// Initialization - create the door, locks, and other initialization
int SolarisAttachListener::init() {
  if (create_door()) {
    return -1;
  }

  int status = os::Solaris::mutex_init(&_mutex);
  assert_status(status==0, status, "mutex_init");

  status = ::sema_init(&_wakeup, 0, NULL, NULL);
  assert_status(status==0, status, "sema_init");

  set_head(NULL);
  set_tail(NULL);

  return 0;
}
Ejemplo n.º 18
0
// only for dev
int main(int argc, char *argv[])
{

    // call
    QStringList command;

    for(int i=1; i<argc; i++) {
        command.append(QString("%1").arg(argv[i]));
    }

//    command.append("function");
//    command.append("my");
//    command.append("arg_2");
//    command.append("arg_3");

    qDebug() << "Return: " << set_head(command) << endl;
}
// Preallocate the maximum number of operations that can be enqueued.
int Win32AttachListener::init() {
  _mutex = (void*)::CreateMutex(NULL, FALSE, NULL);
  guarantee(_mutex != (HANDLE)NULL, "mutex creation failed");

  _enqueued_ops_semaphore = ::CreateSemaphore(NULL, 0, max_enqueued_operations, NULL);
  guarantee(_enqueued_ops_semaphore != (HANDLE)NULL, "semaphore creation failed");

  set_head(NULL);
  set_tail(NULL);
  set_available(NULL);

  for (int i=0; i<max_enqueued_operations; i++) {
    Win32AttachOperation* op = new Win32AttachOperation();
    op->set_next(available());
    set_available(op);
  }

  return 0;
}
Ejemplo n.º 20
0
static int
internal_function
top_check(void)
{
  mchunkptr t = top(&main_arena);
  char* brk, * new_brk;
  INTERNAL_SIZE_T front_misalign, sbrk_size;
  unsigned long pagesz = GLRO(dl_pagesize);

  if (t == initial_top(&main_arena) ||
      (!chunk_is_mmapped(t) &&
       chunksize(t)>=MINSIZE &&
       prev_inuse(t) &&
       (!contiguous(&main_arena) ||
	(char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
    return 0;

  malloc_printerr (check_action, "malloc: top chunk is corrupt", t);

  /* Try to set up a new top chunk. */
  brk = MORECORE(0);
  front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
  if (front_misalign > 0)
    front_misalign = MALLOC_ALIGNMENT - front_misalign;
  sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
  sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
  new_brk = (char*)(MORECORE (sbrk_size));
  if (new_brk == (char*)(MORECORE_FAILURE))
    {
      __set_errno (ENOMEM);
      return -1;
    }
  /* Call the `morecore' hook if necessary.  */
  void (*hook) (void) = force_reg (__after_morecore_hook);
  if (hook)
    (*hook) ();
  main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;

  top(&main_arena) = (mchunkptr)(brk + front_misalign);
  set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);

  return 0;
}
Ejemplo n.º 21
0
bool wp_write(char ch)
{
	int		i;
	bool	ok;

	if ((ch == TAB) || (ch == ' '))
	{
		if (proportional)
		{
			if  (ch == ' ')
			{
				char_pos++;
				nr_spaces++;
			}
			else
			{
				char_pos += tab_size;
				nr_spaces += tab_size;
			}
		}
		else
		{
		  	if (ch == ' ')
				ok = write_char(' ');
			else
			 	for (i = 1; i <= tab_size; i++)
					ok = write_char(' ');
		}
	}
	else
	{
		if (nr_spaces > 1)
			ok = set_head();
		else if (nr_spaces == 1)
		{
			ok = write_char(' ');
			nr_spaces = 0;
		}
		ok = write_char(ch);
	}
	return ok;
}
// preallocate the required number of operations
int Win32AttachListener::init() {
  _mutex = (void*)::CreateMutex(NULL, FALSE, NULL);
  guarantee(_mutex != (HANDLE)NULL, "mutex creation failed");

  _wakeup = ::CreateSemaphore(NULL, 0, 1, NULL);
  guarantee(_wakeup != (HANDLE)NULL, "semaphore creation failed");

  set_head(NULL);
  set_tail(NULL);

  // preallocate a few operations
  set_available(NULL);
  for (int i=0; i<preallocate_count; i++) {
    Win32AttachOperation* op = new Win32AttachOperation();
    op->set_next(available());
    set_available(op);
  }

  return 0;
}
Ejemplo n.º 23
0
// Enqueue an operation
void SolarisAttachListener::enqueue(SolarisAttachOperation* op) {
  // lock list
  int res = os::Solaris::mutex_lock(mutex());
  assert(res == 0, "mutex_lock failed");

  // enqueue at tail
  op->set_next(NULL);
  if (head() == NULL) {
    set_head(op);
  } else {
    tail()->set_next(op);
  }
  set_tail(op);

  // wakeup the attach listener
  RESTARTABLE(::sema_post(wakeup()), res);
  assert(res == 0, "sema_post failed");

  // unlock
  os::Solaris::mutex_unlock(mutex());
}
Ejemplo n.º 24
0
int add_at_last(linkedlist_t* list, void* data)
{
    node* n;
    n = node_new(data);

    // empty?
    if(NULL == get_head(list))
    {
        set_head(list, n);
        set_tail(list, n);
        list->size = 1;
        return list->size;
    }

    set_next(get_tail(list), n);
    set_prev(n, get_tail(list));
    set_tail(list, n);

    list->size++;
    return list->size;
}
Ejemplo n.º 25
0
bool CMonster::ReplaceSoul(const MonsterInfo &info, bool boss)
{
	bBoss = boss;
	id = info.ID;
	name = info.name;
	set_head(info.Head);
	set_Lv(info.level);
	set_exp(info.exp);
	set_hp_m(info.hp);
	set_mp_m(info.mp);
	set_dc(info.DC1, info.DC2);
	set_mc(info.MC1, info.MC2);
	set_sc(0, 0);
	set_ac(info.AC, info.AC);
	set_mac(info.MAC, info.MAC);
	set_intervel(info.interval);

	QString str[10] = { QStringLiteral("ÆË»÷"), QStringLiteral("³åײ"), QStringLiteral("¿ÖÏÅ"), QStringLiteral("·É»÷"), QStringLiteral("Ó°»÷"),
		QStringLiteral("¶¾Êõ"), QStringLiteral("´ÎÉù²¨"), QStringLiteral("¼«ËÙ"), QStringLiteral("¾ÞÁ¦"), QStringLiteral("¼ṳ̀") };
	skill.name = str[qrand() % 10];
	return true;
}
Ejemplo n.º 26
0
void test_picking(){
	int n = 10000;
	set_t *set = new_set(0);
	int i;
	for (i=0; i < n; i++){
		set_put(set, i);
	}
	assert(set_size(set) == n);
	
	set_entry_t *p = set_head(set);
	for (i=0; i < n; i++){
		assert(p->key == i);
		p = p->next;
	}
	
	for (i=0; i < 3*n; i++){
		int v = set_get_random(set);
		assert(v < n);
	}
	
	delete_set(set);
}
// dequeue the operation from the head of the operation list. If
Win32AttachOperation* Win32AttachListener::dequeue() {
  for (;;) {
    DWORD res = ::WaitForSingleObject(wakeup(), INFINITE);
    guarantee(res == WAIT_OBJECT_0, "wait failed");

    res = ::WaitForSingleObject(mutex(), INFINITE);
    guarantee(res == WAIT_OBJECT_0, "wait failed");

    Win32AttachOperation* op = head();
    if (op != NULL) {
      set_head(op->next());
      if (head() == NULL) {     // list is empty
        set_tail(NULL);
      }
    }
    ::ReleaseMutex(mutex());

    if (op != NULL) {
      return op;
    }
  }
}
// dequeue the operation from the head of the operation list.
Win32AttachOperation* Win32AttachListener::dequeue() {
  for (;;) {
    DWORD res = ::WaitForSingleObject(enqueued_ops_semaphore(), INFINITE);
    // returning from WaitForSingleObject will have decreased
    // the current count of the semaphore by 1.
    guarantee(res == WAIT_OBJECT_0, "wait failed");

    res = ::WaitForSingleObject(mutex(), INFINITE);
    guarantee(res == WAIT_OBJECT_0, "wait failed");

    Win32AttachOperation* op = head();
    if (op != NULL) {
      set_head(op->next());
      if (head() == NULL) {     // list is empty
        set_tail(NULL);
      }
    }
    ::ReleaseMutex(mutex());

    if (op != NULL) {
      return op;
    }
  }
}
Ejemplo n.º 29
0
uint32_t buffer_write(char* buffer, const unsigned char* buf, uint32_t len,
                      int on_full) {
  uint32_t free;
  uint32_t bytes;
  uint32_t head;
  uint32_t size;
  uint32_t written;

  if (is_full(buffer)) return on_full;
  free = get_free_space(buffer);
  bytes = free > len ? len : free;
  head = get_head(buffer);
  size = get_size(buffer);
  written = 0;
  while (written < bytes) {
    // TODO(ricow): consider using memmove here instead;
    char* value_pointer = buffer + kDataIndex + head;
    *value_pointer = buf[written];
    head = (head + 1) % size;
    written++;
  }
  set_head(buffer, head);
  return written;
}
Ejemplo n.º 30
0
//static void 
void
pos_int_free(char *name, mstate av, mchunkptr p, int flag)
{
	INTERNAL_SIZE_T size;
	mfastbinptr* fb;
	mchunkptr prevchunk;
	INTERNAL_SIZE_T prevsize;
	mchunkptr nextchunk;
	INTERNAL_SIZE_T nextsize;
	int nextinuse;
	mchunkptr bck;
	mchunkptr fwd;

	//const char *errstr = NULL;


	size = chunksize(p);
	/*if ((uintptr_t) p > (uintptr_t) -size || misaligned_chunk (p)) {
		errstr = "free(): invalid pointer";
errout:
		//malloc_printerr (check_action, errstr, chunk2mem(p));
		return;
	}*/
	/*if (size < MINSIZE) {
		errstr = "free(): invalid size";
		goto errout;
	}*/

	//check_inuse_chunk(av, p);


	// fastbin
	if (flag==1 && (unsigned long)(size) <= (unsigned long)(get_max_fast ())) {
		/*if (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
		   || chunksize (chunk_at_offset (p, size)) >= av->system_mem) {
			errstr = "free(): invalid next size (fast)";
			goto errout;
		}*/

#if CONSISTENCY == 1
		set_fastchunks_log(name, av);
#else
		set_fastchunks(av);
#endif
		fb = &fastbin(av, fastbin_index(size));

		if (*fb == p) {
			//errstr = "double free or corruption (fasttop)";
			//goto errout;
			return ;
		}

#if CONSISTENCY == 1
		POS_WRITE_VAUE(name, (unsigned long *)&p->fd, (unsigned long)*fb);
		POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)p);
#else
		p->fd = *fb;
		*fb = p;
#endif

		return ;
	}

	// 1. First chunk
	if (chunk_is_first(p)) {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		// 1-1. (free F), free L
		if (chunk_is_last(nextchunk) && !inuse(nextchunk)) {

			//if (av < p && p < (char *)(av+PAGESIZE)){
			if ((char*)av+sizeof(struct malloc_state) == (char*)p) {
#if CONSISTENCY == 1
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				insert_to_unsorted(av, p, bck, fwd, size);

				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif

				goto out;
			}
			else {
#if CONSISTENCY == 1
				unlink_log(name, nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;

				pos_log_insert_malloc_free(name, (unsigned long)p, size);
				//pos_seg_free(name, (void *)p, size); // Delayed pos_seg_free
				POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
				unlink(nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;
				/*if (size%PAGESIZE != 0) {
					errstr = "free(): unmmap size is not page size";
					goto errout;
				}*/
				//FREE((char*)p, size);
				pos_seg_free(name, (void *)p, size);
				av->system_mem -= size;
#endif

				goto out;
			}

		}

		// 1-3. (free F), free M
		else if (!inuse(nextchunk)) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot_log(name, p, size);
#else
			unlink(nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted(av, p, bck, fwd, size);

			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot(p, size);
#endif

			goto out;
		}

		// 1-2. (free F), inuse L & 1-4. (free F), inuse M
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);

			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 2. Last chunk
	else if (chunk_is_last(p)) {

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 2-1. free F, (free L)
			if (chunk_is_first(prevchunk)) {

				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
#endif

					goto out;
				}
				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					//pos_seg_free(name, (void *)p, size);
					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
#endif

					goto out;
				}

			}

			// 2-3. free M, (free L)
			else {
#if CONSISTENCY == 1
				unlink_log(name, prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				unlink(prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted(av, p, bck, fwd, size);

				set_head(p, size | LAST_CHUNK | PREV_INUSE);
				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif
				goto out;
			}

		}

		// 2-2. inuse F, (free L) & 2-4. inuse M, (free L)
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);
  
			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 3. Middle chunk
	else {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 3-1. free F, (free M), free L
			if (chunk_is_first(prevchunk) && chunk_is_last(nextchunk) && !inuse(nextchunk) ) {
	
				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					unlink(prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_head(p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
  #endif
  
					goto out;
				}

				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					unlink_log(name, nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;

					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					//pos_seg_free(name, (void *)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					unlink(nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
  #endif
  
					goto out;
				}
			}

#if CONSISTENCY == 1
			unlink_log(name, prevchunk, bck, fwd);
#else
			unlink(prevchunk, bck, fwd);
#endif
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));

			if (chunk_is_first(prevchunk)) {
#if CONSISTENCY == 1
				set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
#else
				set_head(p, size | FIRST_CHUNK | PREV_INUSE);
				//set_foot(p, size);
				//clear_inuse_bit_at_offset(p, size);
#endif
			}

		}

		nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

		if (!nextinuse) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
#else
			unlink(nextchunk, bck, fwd);
#endif
			size += nextsize;
		}

#if CONSISTENCY == 1
		insert_to_unsorted_log(name, av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head_log(name, p, size | PREV_INUSE);
		}
		set_foot_log(name, p, size);
		clear_inuse_bit_at_offset_log(name, p, size);
#else
		//else
		//clear_inuse_bit_at_offset(nextchunk, 0);

		insert_to_unsorted(av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head(p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head(p, size | PREV_INUSE);
		}
		set_foot(p, size);
		clear_inuse_bit_at_offset(p, size);

		//check_free_chunk(av, p);
 #endif
	}

out: 
	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD && have_fastchunks(av)) {
		pos_malloc_consolidate(name, av);
	}
}