static inline void add(hdr_t* hdr, size_t size) {
    ScopedPthreadMutexLocker locker(&lock);
    hdr->tag = ALLOCATION_TAG;
    hdr->size = size;
    init_front_guard(hdr);
    init_rear_guard(hdr);
    ++gAllocatedBlockCount;
    add_locked(hdr, &tail, &head);
}
Esempio n. 2
0
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
                    grpc_event_finish_func on_finish, void *user_data,
                    grpc_op_error error) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call, on_finish, user_data);
  ev->base.data.write_accepted = error;
  end_op_locked(cc, GRPC_OP_COMPLETE);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 3
0
void grpc_cq_end_read(grpc_completion_queue *cc, void *tag, grpc_call *call,
                      grpc_event_finish_func on_finish, void *user_data,
                      grpc_byte_buffer *read) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_READ, tag, call, on_finish, user_data);
  ev->base.data.read = read;
  end_op_locked(cc, GRPC_READ);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
static inline void add(struct hdr *hdr, size_t size)
{
    pthread_mutex_lock(&lock);
    hdr->tag = ALLOCATION_TAG;
    hdr->size = size;
    init_front_guard(hdr);
    init_rear_guard(hdr);
    num++;
    add_locked(hdr, &tail, &head);
    pthread_mutex_unlock(&lock);
}
static inline void add_to_backlog(hdr_t* hdr) {
    ScopedPthreadMutexLocker locker(&backlog_lock);
    hdr->tag = BACKLOG_TAG;
    backlog_num++;
    add_locked(hdr, &backlog_tail, &backlog_head);
    poison(hdr);
    /* If we've exceeded the maximum backlog, clear it up */
    while (backlog_num > gMallocDebugBacklog) {
        hdr_t* gone = backlog_tail;
        del_from_backlog_locked(gone);
        dlfree(gone->base);
    }
}
Esempio n. 6
0
void grpc_cq_end_client_metadata_read(grpc_completion_queue *cc, void *tag,
                                      grpc_call *call,
                                      grpc_event_finish_func on_finish,
                                      void *user_data, size_t count,
                                      grpc_metadata *elements) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_CLIENT_METADATA_READ, tag, call, on_finish,
                  user_data);
  ev->base.data.client_metadata_read.count = count;
  ev->base.data.client_metadata_read.elements = elements;
  end_op_locked(cc, GRPC_CLIENT_METADATA_READ);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
static inline void add_to_backlog(struct hdr *hdr)
{
    pthread_mutex_lock(&backlog_lock);
    hdr->tag = BACKLOG_TAG;
    backlog_num++;
    add_locked(hdr, &backlog_tail, &backlog_head);
    poison(hdr);
    /* If we've exceeded the maximum backlog, clear it up */
    while (backlog_num > malloc_double_free_backlog) {
        struct hdr *gone = backlog_tail;
        del_from_backlog_locked(gone);
        dlfree(gone);
    }
    pthread_mutex_unlock(&backlog_lock);
}
Esempio n. 8
0
void grpc_cq_end_finished(grpc_completion_queue *cc, void *tag, grpc_call *call,
                          grpc_event_finish_func on_finish, void *user_data,
                          grpc_status_code status, const char *details,
                          grpc_metadata *metadata_elements,
                          size_t metadata_count) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_FINISHED, tag, call, on_finish, user_data);
  ev->base.data.finished.status = status;
  ev->base.data.finished.details = details;
  ev->base.data.finished.metadata_count = metadata_count;
  ev->base.data.finished.metadata_elements = metadata_elements;
  end_op_locked(cc, GRPC_FINISHED);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 9
0
void grpc_cq_end_new_rpc(grpc_completion_queue *cc, void *tag, grpc_call *call,
                         grpc_event_finish_func on_finish, void *user_data,
                         const char *method, const char *host,
                         gpr_timespec deadline, size_t metadata_count,
                         grpc_metadata *metadata_elements) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_SERVER_RPC_NEW, tag, call, on_finish, user_data);
  ev->base.data.server_rpc_new.method = method;
  ev->base.data.server_rpc_new.host = host;
  ev->base.data.server_rpc_new.deadline = deadline;
  ev->base.data.server_rpc_new.metadata_count = metadata_count;
  ev->base.data.server_rpc_new.metadata_elements = metadata_elements;
  end_op_locked(cc, GRPC_SERVER_RPC_NEW);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 10
0
int TESTremove(int tableType, int numPkt, int numWorkers) 
{
  int i, rc;  
  volatile HashPacket_t *pkt;
  if (numPkt % numWorkers) {
    fprintf(stderr,"ERROR: pkts not divisible by workers\n");
    exit(-1);
  }
  
  genSource = createHashPacketGenerator(.25, .25, 0.5, 1000);

  // allocate and initialize queues + fingerprints

  HashList_t *queues[numWorkers];
  long fingerprints[numWorkers];
  for (i = 0; i < numWorkers; i++) {
    queues[i] = createHashList();
    fingerprints[i] = 0;
  }

  // Initialize Table + worker data
  pthread_t worker[numWorkers];
  ParallelPacketWorker_t data[numWorkers];
  hashtable_t *htable = initTable(4, 0, data, genSource, numWorkers, NULL, queues, fingerprints, tableType);

  switch(tableType) {
  case(LOCKED):
    for (i=0; i < (numPkt*2); i++) {
      pkt = getRandomPacket(genSource);
      pkt->key = i;
      add_locked(htable, pkt->key, pkt->body);
      enqueue(queues[i % numWorkers], numPkt+1, pkt, i);
    }
    break;
  case(LOCKFREEC):
    for (i=0; i < (numPkt*2); i++) {
      pkt = getRandomPacket(genSource);
      pkt->key = i;
      add_lockFreeC(htable, pkt->key, pkt->body);
      enqueue(queues[i % numWorkers], numPkt+1, pkt, i);
    }
    break; 
  case(LINEARPROBED):
    for (i=0; i < (numPkt*2); i++) {
      pkt = getRandomPacket(genSource);
      pkt->key = i;
      add_linearProbe(htable, pkt->key, pkt->body);
      enqueue(queues[i % numWorkers], numPkt+1, pkt, i);
    }
    break; 
  case(AWESOME):
    for (i=0; i < (numPkt*2); i++) {
	pkt = getRandomPacket(genSource);
	pkt->key = i;
	add_awesome(htable, pkt->key, pkt->body);
	enqueue(queues[i % numWorkers], numPkt+1, pkt, i);
    }
    break;
  }
  
  for (i = 0 ; i < numWorkers; i++) {
    data[i].myCount = numPkt/numWorkers;
  }
  
  // Spawn Workers
  for (i = 0; i <numWorkers; i++) {
    if ((rc = pthread_create(worker+i, NULL, (void *) &removeWorker, (void *) (data+i)))) {
      fprintf(stderr,"ERROR: return code from pthread_create() for thread is %d\n", rc);
      exit(-1);
    }
  }
  
  // call join for each Worker
  for (i = 0; i < numWorkers; i++) {
    pthread_join(worker[i], NULL);
  }

  int res = 0;
  for (i = 0; i < numWorkers; i++) {
    res += (data[i].myCount == -1);
    //printf("COUNT = %li\n", data[i].myCount);
  }

  //print_locked(htable->locked);
  //printf("size = %li\n",  countPkt(htable, tableType));
  int size = (countPkt(htable, tableType) == numPkt); 
  free_htable(htable, tableType);
  return (!res) && size;
}
Esempio n. 11
0
int TESTcontains(int tableType, int numPkt, int numWorkers)
{
  int i, rc;  
  // allocate and initialize queues + fingerprints
  HashList_t *queues[numWorkers];
  long fingerprints[numWorkers];
  volatile HashPacket_t *pkt;
  for (i = 0; i < numWorkers; i++) {
    queues[i] = createHashList();
    pkt = getRandomPacket(genSource);
    HashItem_t *newItem = (HashItem_t *)malloc(sizeof(HashItem_t));
    newItem->key = numPkt;
    newItem->value = pkt;
    queues[i]->head = newItem;
    queues[i]->tail = newItem;
    queues[i]->size++;

    fingerprints[i] = 0;
  }
  
  genSource = createHashPacketGenerator(.25, .25, 0.5, 1000);

  // Initialize Table + worker data
  pthread_t worker[numWorkers];
  ParallelPacketWorker_t data[numWorkers];
  hashtable_t *htable = initTable(12, 0, data, genSource, numWorkers, NULL, queues, fingerprints, tableType);

  for (i = 0 ; i < numWorkers; i++) {
    data[i].myCount = numPkt;
  }

  i = 0;

  switch(tableType) {
  case(LOCKED):
    for (i=0; i < numPkt; i++) {
      pkt = getRandomPacket(genSource);
      add_locked(htable, i, pkt->body);
      enqueue(queues[0], numPkt+1, pkt, i);
    }
    break;
  case(LOCKFREEC):
    for (i=0; i < numPkt; i++) {
      pkt = getRandomPacket(genSource);
      add_lockFreeC(htable, i, pkt->body);
      enqueue(queues[0], numPkt+1, pkt, i);
    }
    break;
  case(LINEARPROBED):
    for (i=0; i < numPkt; i++) {
      pkt = getRandomPacket(genSource);
      add_linearProbe(htable, i, pkt->body);
      enqueue(queues[0], numPkt+1, pkt, i);
    }
    break;
  case(AWESOME):
    for (i=0; i < numPkt; i++) {
      pkt = getRandomPacket(genSource);
      add_awesome(htable, i, pkt->body);
      enqueue(queues[0], numPkt+1, pkt, i);
    }
    break;
  }
  
  // Spawn Workers
  for (i = 0; i <numWorkers; i++) {
    if ((rc = pthread_create(worker+i, NULL, (void *) &containsWorker, (void *) (data+i)))) {
      fprintf(stderr,"ERROR: return code from pthread_create() for thread is %d\n", rc);
      exit(-1);
    }
  }
  
  // call join for each Worker
  for (i = 0; i < numWorkers; i++) {
    pthread_join(worker[i], NULL);
  }   
  
  int res = 0;
  // -1 indicates failure. Check all threads
  for (i = 0; i < numWorkers; i++) {
    res += (data[i].myCount == -1);
    //printf("COUNT = %i\n", data[i].myCount);
  }
  //print_table(htable, tableType);
  free_htable(htable, tableType);
  return !res;
}
Esempio n. 12
0
int TESTcreatetable(int tableType, int numPkt, int numWorkers) {
  hashtable_t *t = (hashtable_t *)malloc(sizeof(hashtable_t));

  HashPacketGenerator_t * source = createHashPacketGenerator(.25, .25, 0.5, 1000);
  int size, i = 0;
  int sum = 0;
  volatile HashPacket_t *pkt;

  switch(tableType) {
  case(LOCKED):
    t->locked = createLockedTable(numPkt, 12, numWorkers);
    for (i=0; i < numPkt; i++) {
      pkt = getAddPacket(source);
      add_locked(t, mangleKey((HashPacket_t *)pkt), pkt->body);
    }
    size = t->locked->size;
    for (i = 0; i < size; i++) {
      if ((t->locked->table)[i] != NULL) {
	sum +=  (t->locked->table)[i]->size;
      }
    }
    break;
  case(LOCKFREEC):
    t->lockFreeC = createLockFreeCTable(numPkt, 12, numWorkers);
    for (i=0; i < numPkt; i++) {
      pkt = getAddPacket(source);
      add_lockFreeC(t, mangleKey((HashPacket_t *)pkt), pkt->body);
    }
    size = t->lockFreeC->size;
    for (i = 0; i < size; i++) {
      if ((t->lockFreeC->table)[i] != NULL) {
	sum +=  (t->lockFreeC->table)[i]->size;
      }
    }
    break; 
  case(LINEARPROBED):
    t->linearProbe = createLinearProbeTable(numPkt, 12, numWorkers);
    for (i=0; i < numPkt; i++) {
      pkt = getAddPacket(source);
      add_linearProbe(t, mangleKey((HashPacket_t *)pkt), pkt->body);
    }
    size = t->linearProbe->size;
    for (i = 0; i < size; i++) {
      sum += ((t->linearProbe->table)[i].value != NULL);
    }
    break;
  case(AWESOME):
    t->awesome = createAwesomeTable(30);
    for (i=0; i < numPkt; i++) {
      pkt = getAddPacket(source);
      add_awesome(t, i, pkt->body);
    }
    size = (1 << numPkt);
    //size = t->awesome->bucketSize;
    sum = (int)countPkt(t, tableType);
    break; 
  }
  if (size != (1 << numPkt)) {
    fprintf(stderr,"ERROR: Tree size incorrect. Got %i. Want %i.\n", size, (1 << numPkt));
    return 0;
  }
  if (sum != numPkt) {
    fprintf(stderr,"ERROR: Number of entries incorrect. Got %i. Want %i.\n", sum, numPkt);
      return 0;
  }
  free_htable(t, tableType);
  return i;
}
Esempio n. 13
0
void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag) {
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  add_locked(cc, GRPC_SERVER_SHUTDOWN, tag, NULL, NULL, NULL);
  end_op_locked(cc, GRPC_SERVER_SHUTDOWN);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 14
0
void AntiSem::add_int(ulen dcount)
 {
  add_locked(InterruptContext,dcount);
 }
Esempio n. 15
0
void AntiSem::add(ulen dcount)
 {
  Dev::IntLock lock;

  add_locked(CurTaskContext,dcount);
 }