Esempio n. 1
0
void * worker_loop(void * arg_void) {

  WorkerLoopArgs * arg = arg_void;
  
  StateMachine * stateMachine;
  DfaSet * dfaSet = arg->dfaSet;
  state_machine_id machine_id = arg->machine_id;
  readlock(dfaSet->lock, machine_id);

  DEBUG("Worker loop: %ld\n", machine_id);
  
  stateMachine = getSmash(dfaSet->smash, machine_id);

  assert(stateMachine->machine_id == machine_id);

  if(pthread_detach(stateMachine->worker_thread) != 0) {
    perror("pthread_detach");
  }

  readunlock(dfaSet->lock);
  inner_worker_loop(arg_void);

  writelock(dfaSet->lock, machine_id); 
  DEBUG("Freeing machine %ld\n", machine_id);

  freeSmash(dfaSet->smash, machine_id);
  writeunlock(dfaSet->lock);

  return 0;
}
Esempio n. 2
0
static void stasis_alloc_register_old_regions(stasis_alloc_t* alloc) {
  pageid_t boundary = REGION_FIRST_TAG;
  boundary_tag t;
  DEBUG("registering old regions\n");
  int succ = TregionReadBoundaryTag(-1, boundary, &t);
  if(succ) {
    do {
      DEBUG("boundary tag %lld type %d\n", boundary, t.allocation_manager);
      if(t.allocation_manager == STORAGE_MANAGER_TALLOC) {
        for(pageid_t i = 0; i < t.size; i++) {
          Page * p = loadPage(-1, boundary + i);
          readlock(p->rwlatch,0);
          if(p->pageType == SLOTTED_PAGE) {
            stasis_allocation_policy_register_new_page(alloc->allocPolicy, p->id, stasis_record_freespace(-1, p));
            DEBUG("registered page %lld\n", boundary+i);
          } else {
            abort();
          }
          unlock(p->rwlatch);
          releasePage(p);
        }
      }
    } while(TregionNextBoundaryTag(-1, &boundary, &t, 0));  //STORAGE_MANAGER_TALLOC)) {
  }
}
Esempio n. 3
0
int TrecordType(int xid, recordid rid) {
  Page * p;
  p = loadPage(xid, rid.page);
  readlock(p->rwlatch,0);
  int ret;
  ret = stasis_record_type_read(xid, p, rid);
  unlock(p->rwlatch);
  releasePage(p);
  return ret;
}
Esempio n. 4
0
void *reader (void *args)
{
    rwargs *a = static_cast<rwargs *>(args);
    int d;

    do
    {
        readlock (a->lock, a->id);
        d = data;
        usleep (a->delay);
        readunlock (a->lock);
        RPMS_DEBUG( LOGNAME, "Reader" + rpms::convert<std::string>(a->id) + ": data = " + rpms::convert<std::string>(d) );
        usleep (a->delay);
    } while (d != 0);
    RPMS_DEBUG( LOGNAME, "Reader" + rpms::convert<std::string>(a->id) + ": finished" );
    return 0;
}
Esempio n. 5
0
int TrecordSize(int xid, recordid rid) {
  int ret;
  Page * p;
  p = loadPage(xid, rid.page);
  readlock(p->rwlatch,0);
  rid.size = stasis_record_length_read(xid, p, rid);
  if(stasis_record_type_read(xid,p,rid) == BLOB_SLOT) {
    blob_record_t r;
    stasis_record_read(xid,p,rid,(byte*)&r);
    ret = r.size;
  } else {
    ret = rid.size;
  }
  unlock(p->rwlatch);
  releasePage(p);
  return ret;
}
Esempio n. 6
0
void * run_request(DfaSet * dfaSet, state_machine_id machine_id) {
  void * ret;
  WorkerLoopArgs * worker_loop_args = malloc(sizeof(WorkerLoopArgs));
  StateMachine * machine;
  
  readlock(dfaSet->lock, 600);
  
  machine =  getSmash(dfaSet->smash, machine_id);
 
  worker_loop_args->dfaSet = dfaSet;
  worker_loop_args->machine_id = machine_id;
  
  machine->worker_thread = pthread_self();
  readunlock(dfaSet->lock);

  ret = inner_worker_loop(worker_loop_args);

  return (void*)ret;

}
Esempio n. 7
0
static void stasis_alloc_reserve_new_region(stasis_alloc_t* alloc, int xid) {
     void* nta = TbeginNestedTopAction(xid, OPERATION_NOOP, 0,0);

     pageid_t firstPage = TregionAlloc(xid, TALLOC_REGION_SIZE, STORAGE_MANAGER_TALLOC);
     int initialFreespace = -1;

     for(pageid_t i = 0; i < TALLOC_REGION_SIZE; i++) {
       TinitializeSlottedPage(xid, firstPage + i);
       if(initialFreespace == -1) {
         Page * p = loadPage(xid, firstPage);
         readlock(p->rwlatch,0);
         initialFreespace = stasis_record_freespace(xid, p);
         unlock(p->rwlatch);
         releasePage(p);
       }
       stasis_allocation_policy_register_new_page(alloc->allocPolicy, firstPage + i, initialFreespace);
     }

     TendNestedTopAction(xid, nta);
}
Esempio n. 8
0
static const LogEntry * stasis_log_impl_in_memory_read_entry(stasis_log_t* log,
                                                 lsn_t lsn) {
  stasis_log_impl_in_memory * impl = log->impl;
  DEBUG("lsn: %ld\n", lsn);
  readlock(impl->globalOffset_lock, 0);
  if(lsn >= impl->nextAvailableLSN) {
    unlock(impl->globalOffset_lock);
    return NULL;
  }
  if(!(lsn-impl->globalOffset >= 0 && lsn-impl->globalOffset< impl->bufferLen)) {
    unlock(impl->globalOffset_lock);
    return NULL;
  }
  LogEntry * ptr = impl->buffer[lsn - impl->globalOffset];
  unlock(impl->globalOffset_lock);
  assert(ptr);
  assert(ptr->LSN == lsn);

  DEBUG("lsn: %ld prevlsn: %ld\n", ptr->LSN, ptr->prevLSN);
  return ptr;
}
Esempio n. 9
0
dataTuple* dataPage::iterator::getnext() {
    len_t len;
    bool succ;
    if(dp == NULL) {
        return NULL;
    }
    // XXX hack: read latch the page that the record will live on.
    // This should be handled by a read_data_in_latch function, or something...
    Page * p = loadPage(dp->xid_, dp->calc_chunk_from_offset(read_offset_).page);
    readlock(p->rwlatch, 0);
    succ = dp->read_data((byte*)&len, read_offset_, sizeof(len));
    if((!succ) || (len == 0)) {
        unlock(p->rwlatch);
        releasePage(p);
        return NULL;
    }
    read_offset_ += sizeof(len);

    byte * buf = (byte*)malloc(len);
    succ = dp->read_data(buf, read_offset_, len);

    // release hacky latch
    unlock(p->rwlatch);
    releasePage(p);

    if(!succ) {
        read_offset_ -= sizeof(len);
        free(buf);
        return NULL;
    }

    read_offset_ += len;

    dataTuple *ret = dataTuple::from_bytes(buf);

    free(buf);

    return ret;
}
Esempio n. 10
0
void * inner_worker_loop(void * arg_void) {

  WorkerLoopArgs * arg = arg_void;
  DfaSet * dfaSet = arg->dfaSet;
  const state_machine_id machine_id = arg->machine_id;

  int timeout = 0; /* Run through the loop immediately the first time around. */
  int state = 0;
  int first = 1;
  StateMachine* stateMachine;

  
  free(arg_void);

  readlock(dfaSet->lock, machine_id); 



  stateMachine = getSmash(dfaSet->smash, machine_id);

  pthread_mutex_lock(stateMachine->mutex);
  
  while(1) {
    int rc = 0;

    state_name i, state_idx = NULL_STATE; 
    
    /** @todo inner worker loop doesn't seem to 'do the right thing' with respect to timing */
    if(1|| !stateMachine->pending) {  /* If no pending messages, go to sleep */
      struct timeval now;
      struct timespec timeout_spec;

      pthread_cond_t * cond;
      pthread_mutex_t * mutex;
      
      long usec;

      cond = stateMachine->sleepCond;
      mutex = stateMachine->mutex;
      
      readunlock(dfaSet->lock);

      /* A note on locking: This loop maintains a read lock everywhere
	 except for this call to sleep, and upon termination when it
	 requires a write lock. */

      gettimeofday(&now, NULL);

      usec = now.tv_usec + timeout;

      if(usec > 1000000) {
	now.tv_sec++;
	usec-=1000000;
      }
      
      timeout_spec.tv_sec = now.tv_sec;
      timeout_spec.tv_nsec = 1000 * usec;
      

      rc =  pthread_cond_timedwait (cond, mutex, &timeout_spec );

      if(rc == EINVAL) {
	perror("pthread");
      } 
      
      readlock(dfaSet->lock, machine_id);     
      
      /* Some other thread may have invalidated our pointer while we
	 were sleeping witout a lock... no longer true, *but* since
	 our pointer is local to this thread, we still need to re-read
	 from the store.*/

      assert(stateMachine == getSmash(dfaSet->smash, machine_id));
    }

    DEBUG("Current State: %d, %d\n", stateMachine->current_state, NULL_STATE_TOMBSTONE);

    if(stateMachine->current_state == NULL_STATE_TOMBSTONE) {
      DEBUG("Freeing statemachine\n");
      break;
    }
    if(state != stateMachine->current_state) { first = 1; }
    state = stateMachine->current_state;
    stateMachine->message.type = stateMachine->current_state;
    timeout = 690000 +(int) (300000.0*rand()/(RAND_MAX+1.0)); 
    for(i = 0; i < dfaSet->state_count; i++) {
      if(dfaSet->states[i].name == stateMachine->current_state) {
	state_idx = i;
      }
    } 

    assert(state_idx != NULL_STATE);
    DEBUG("Worker loop for state machine: %ld still active\n", machine_id);

    int send = 1;
    if(dfaSet->states[state_idx].retry_fcn != NULL) {
      send = dfaSet->states[state_idx].retry_fcn(dfaSet, stateMachine, &(stateMachine->message), stateMachine->message_recipient);
    }
    if(send) {
      if(first) {
	first = 0;
      } else {
	printf("Resending message. Machine # %ld State # %d\n", stateMachine->machine_id, stateMachine->current_state);
      } 
      send_message(&(dfaSet->networkSetup), &(stateMachine->message), stateMachine->message_recipient);
    }

  }

  setSmash(dfaSet->smash, stateMachine->machine_id);
  pthread_mutex_unlock(stateMachine->mutex);
  readunlock(dfaSet->lock);
  return 0;

}
Esempio n. 11
0
void Tdealloc(int xid, recordid rid) {
  stasis_alloc_t* alloc = stasis_runtime_alloc_state();

  // @todo this needs to garbage collect empty storage regions.

  pthread_mutex_lock(&alloc->mut);
  Page * p = loadPage(xid, rid.page);

  readlock(p->rwlatch,0);

  recordid newrid = stasis_record_dereference(xid, p, rid);
  stasis_allocation_policy_dealloced_from_page(alloc->allocPolicy, xid, newrid.page);

  int64_t size = stasis_record_length_read(xid,p,rid);
  int64_t type = stasis_record_type_read(xid,p,rid);

  if(type == NORMAL_SLOT) { type = size; }

  byte * preimage = malloc(sizeof(alloc_arg)+size);

  ((alloc_arg*)preimage)->slot = rid.slot;
  ((alloc_arg*)preimage)->type = type;

  // stasis_record_read() wants rid to have its raw size to prevent
  // code that doesn't know about record types from introducing memory
  // bugs.
  rid.size = size;
  stasis_record_read(xid, p, rid, preimage+sizeof(alloc_arg));
  // restore rid to valid state.
  rid.size = type;

  // Ok to release latch; page is still pinned (so no WAL problems).
  // allocationPolicy protects us from running out of space due to concurrent
  // xacts.

  // Also, there can be no reordering of allocations / deallocations ,
  // since we're holding alloc->mutex.  However, we might reorder a Tset()
  // to and a Tdealloc() or Talloc() on the same page.  If this happens,
  // it's an unsafe race in the application, and not technically our problem.

  // @todo  Tupdate forces allocation to release a latch, leading to potentially nasty application bugs.  Perhaps this is the wrong API!

  // @todo application-level allocation races can lead to unrecoverable logs.
  unlock(p->rwlatch);

  Tupdate(xid, rid.page, preimage,
          sizeof(alloc_arg)+size, OPERATION_DEALLOC);

  releasePage(p);

  pthread_mutex_unlock(&alloc->mut);

  if(type==BLOB_SLOT) {
    stasis_blob_dealloc(xid,(blob_record_t*)(preimage+sizeof(alloc_arg)));
  }

  free(preimage);

  stasis_transaction_table_set_argument(alloc->xact_table, xid, alloc->callback_id,
					AT_COMMIT, alloc);

}