Example #1
0
char *testrd() {
	int fd = open("/dev/rd", O_RDWR);
	int i;

	assert_ne("open", -1, fd);

	for (i = 0; i < sizeof(buf1); i++) {
		buf1[i] = (i + 1) % 256;
		buf2[i] = 0;
	}
/*
	assert_eq("lseek", MAX_SEEK - 1, lseek(fd, MAX_SEEK - 1, SEEK_SET));
	assert_eq("write", -1, write(fd, buf1, 2));
	assert_eq("write", 1, write(fd, buf1, 1));
	assert_eq("write", -1, write(fd, buf1, 1));
	assert_eq("lseek", MAX_SEEK - 1, lseek(fd, MAX_SEEK - 1, SEEK_SET));
	assert_eq("read", -1, read(fd, buf2, 2));
	assert_eq("read", 1, read(fd, buf2, 1));
	assert_eq("read", -1, read(fd, buf2, 1));
*/
	assert_eq("lseek", 0, lseek(fd, 0, SEEK_SET));
	assert_eq("write", sizeof(buf1), write(fd, buf1, sizeof(buf1)));
	assert_eq("lseek", 0, lseek(fd, 0, SEEK_SET));
	assert_eq("read", sizeof(buf2), read(fd, buf2, sizeof(buf2)));
	for (i = 0; i < sizeof(buf1); i++) {
		assert_eq("cmp", buf1[i], buf2[i]);
	}

	assert_eq("close", 0, close(fd));

	return NULL;
}
Example #2
0
void TimeTasks::start_main_task(TimeTasks::Tasks taskid)
{
  if(!is_output_thread()) return;
  assert(is_exclusive(taskid));
  assert_ne(active_task, taskid);
  active_task = taskid;
  assert(!active[taskid]);
  active[taskid]=true;
}
void TimeTasks::addto_communicate()
{
  if(!active_task) return;
  assert_eq(active_mode,COMMUNICATION);
  assert_ne(t_start_communicate,0.);
  communicate[active_task] += MPI_Wtime()-t_start_communicate;
  t_start_communicate = 0.;
  active_mode=COMPUTATION;
}
Example #4
0
obj dequeue_pop_front( obj deq )
{
  obj item, newf, oldf = DEQ_FRONT(deq);
  assert_ne(deq, "dequeue-pop-front!");
  newf = succ( deq, oldf );
  SET_DEQ_FRONT( deq, newf );
  item = gvec_ref( DEQ_STATE(deq), FXWORDS_TO_RIBYTES(oldf) );
  gvec_write_non_ptr( DEQ_STATE(deq), FXWORDS_TO_RIBYTES(oldf), FALSE_OBJ );
  return item;
}
Example #5
0
obj dequeue_pop_back( obj deq )
{
  obj item, newb, oldb = DEQ_BACK(deq);
  assert_ne(deq, "dequeue-pop-back!");
  newb = pred( deq, oldb );
  SET_DEQ_BACK( deq, newb );
  item = gvec_ref( DEQ_STATE(deq), FXWORDS_TO_RIBYTES(newb) );
  /* clear the now-unused position in case there is a GC liveness issue
   * (ie, I was seeing annoying latencies finalizing threads in the
   * new threads system)
   */
  gvec_write_non_ptr( DEQ_STATE(deq), FXWORDS_TO_RIBYTES(newb), FALSE_OBJ );
  return item;
}
Example #6
0
boost::optional<utils::net::NetworkPath> buildRemote(const utils::VirtualPrintable& source, const Host& targetHost, const utils::compiler::Compiler& compiler) {
	// create a temporary local source file
	char sourceFile[] = P_tmpdir "/srcXXXXXX";
	int src = mkstemp(sourceFile);
	assert_ne(src, -1);
	close(src);

	// write source to file
	std::fstream srcFile(sourceFile, std::fstream::out);
	srcFile << source << "\n";
	srcFile.close();

	// build remotely
	auto res = buildRemote(toVector(nfs::NetworkPath(sourceFile)), "binary", targetHost, compiler);

	// delete source file
	if(boost::filesystem::exists(sourceFile)) { boost::filesystem::remove(sourceFile); }
	return res;
}
Example #7
0
rmc_noinline
optional<T> MSQueue<T>::dequeue() {
    // Core message passing: reading the data out of the node comes
    // after getting the pointer to it.
    XEDGE(get_next, node_use);
    // Make sure we see at least head's init
    XEDGE(get_head, get_next);
    // XXX: another part of maintaining the awful head != tail ->
    // next != null invariant that causes like a billion constraints.
    // Think about it a bit more to make sure this is right.
    // This is awful, so many barriers.
    XEDGE(get_head, get_tail);
    // If we see an updated tail (so that head != tail), make sure that
    // we see update to head->next.
    XEDGE(get_tail, get_next);

    // Need to make sure anything visible through the next pointer
    // stays visible when it gets republished at the head or tail
    VEDGE(get_next, catchup_swing);
    VEDGE(get_next, dequeue);

    // Make sure the read out of next executes before the verification
    // that it read from a sensible place:
    XEDGE(get_next, verify_head);


    NodePtr head, tail, next;

    universal data;

    for (;;) {
        head = L(get_head, this->head_);
        tail = L(get_tail, this->tail_); // XXX: really?
        next = L(get_next, head->next_);

        // Consistency check; see note above
        if (head != L(verify_head, this->head_)) continue;

        // Check if the queue *might* be empty
        // XXX: is it necessary to have the empty check under this
        if (head == tail) {
            // Ok, so, the queue might be empty, but it also might
            // be that the tail pointer has just fallen behind.
            // If the next pointer is null, then it is actually empty
            if (next == nullptr) {
                return optional<T> {};
            } else {
                // not empty: tail falling behind; since it is super
                // not ok for the head to advance past the tail,
                // try advancing the tail
                // XXX weak v strong?
                L(catchup_swing,
                  this->tail_.compare_exchange_strong_gen(tail, next));
            }
        } else {
            // OK, now we try to actually read the thing out.
            assert_ne(next.ptr(), nullptr);

            // We need to read the data out of the node
            // *before* we try to dequeue it or else it could get
            // reused before we read it out.
            data = L(node_use, next->data_);
            if (L(dequeue, this->head_.compare_exchange_weak_gen(head, next))) {
                break;
            }
        }
    }

    LPOST(node_use);

    // OK, everything set up.
    // head can be freed
    MSQueueNode::freelist.unlinked(head);
    optional<T> ret(data.extract<T>());

    return ret;
}
Example #8
0
    void slowpathLock(Node::Ptr oldTail) {
        Node me;
        Node::Ptr curTail;
        bool newThreads;

        // Step one, put ourselves at the back of the queue
        for (;;) {
            Node::Ptr newTail = Node::Ptr(&me, oldTail.tag());

            // Enqueue ourselves...
            if (tail_.compare_exchange_strong(oldTail, newTail,
                                              std::memory_order_acq_rel,
                                              std::memory_order_relaxed)) break;

            // OK, maybe the whole thing is just unlocked now?
            if (oldTail == Node::Ptr(nullptr, 0)) {
                // If so, try the top level lock
                if (tail_.compare_exchange_strong(oldTail,
                                                  Node::Ptr(nullptr, 1),
                                                  std::memory_order_acquire,
                                                  std::memory_order_relaxed))
                    goto out;
            }
        }

        // Step two: OK, there is an actual queue, so link up with the old
        // tail and wait until we are at the head of the queue
        if (oldTail.ptr()) {
            // * Writing into the oldTail is safe because threads can't
            //   leave unless there is no thread after them or they have
            //   marked the next ready
            oldTail->next.store(&me, std::memory_order_release);

            while (!me.ready.load(std::memory_order_acquire)) delay();
        }

        // Step three: wait until the lock is freed
        while ((curTail = tail_.load(std::memory_order_relaxed)).tag()) {
            delay();
        }

        // Step four: take the lock
        for (;;) {
            assert_eq(curTail.tag(), 0);
            assert_ne(curTail.ptr(), nullptr);

            newThreads = curTail.ptr() != &me;

            // If there aren't any waiters after us, the queue is
            // empty. Otherwise, keep the old tail.
            Node *newTailP = newThreads ? curTail : nullptr;
            Node::Ptr newTail = Node::Ptr(newTailP, 1);

            if (tail_.compare_exchange_strong(curTail, newTail,
                                              std::memory_order_acquire,
                                              std::memory_order_relaxed)) break;
        }

        // Step five: now that we have the lock, if any threads came
        // in after us, indicate to the next one that it is at the
        // head of the queue
        if (newThreads) {
            // Next thread might not have written itself in, yet,
            // so we have to wait.
            Node *next;
            while (!(next = me.next.load(std::memory_order_acquire))) delay();
            next->ready.store(true, std::memory_order_release);
        }
    out:
        return;
    }
Example #9
0
    void slowpathLock(Node::Ptr oldTail) {
        // makes sure that init of me.next is prior to tail_link in
        // other thread
        VEDGE(node_init, enqueue);
        // init of me needs to be done before publishing it to
        // previous thread also
        VEDGE(node_init, tail_link);
        // Can't write self into previous node until previous node inited
        XEDGE(enqueue, tail_link);

        LS(node_init, Node me);
        Node::Ptr curTail;
        bool newThreads;

        // Step one, put ourselves at the back of the queue
        for (;;) {
            Node::Ptr newTail = Node::Ptr(&me, oldTail.tag());

            // Enqueue ourselves...
            if (L(enqueue,
                  tail_.compare_exchange_strong(oldTail, newTail))) break;

            // OK, maybe the whole thing is just unlocked now?
            if (oldTail == Node::Ptr(nullptr, 0)) {
                // If so, try the top level lock
                if (tail_.compare_exchange_strong(oldTail,
                                                  Node::Ptr(nullptr, 1)))
                    goto out;
            }

            delay();
        }

        // Need to make sure not to compete for the lock before the
        // right time. This makes sure the ordering doesn't get messed
        // up.
        XEDGE(ready_wait, lock);

        // Step two: OK, there is an actual queue, so link up with the old
        // tail and wait until we are at the head of the queue
        if (oldTail.ptr()) {
            // * Writing into the oldTail is safe because threads can't
            //   leave unless there is no thread after them or they have
            //   marked the next ready
            L(tail_link, oldTail->next = &me);

            while (!L(ready_wait, me.ready)) delay();
        }

        // Step three: wait until the lock is freed
        // We don't need a a constraint from this load; "lock" serves
        // to handle this just fine: lock can't succeed until we've
        // read an unlocked tail_.
        while ((curTail = tail_).tag()) {
            delay();
        }

        // Our lock acquisition needs to be finished before we give the
        // next thread a chance to try to acquire the lock or it could
        // compete with us for it, causing trouble.
        VEDGE(lock, signal_next);

        // Step four: take the lock
        for (;;) {
            assert_eq(curTail.tag(), 0);
            assert_ne(curTail.ptr(), nullptr);

            newThreads = curTail.ptr() != &me;

            // If there aren't any waiters after us, the queue is
            // empty. Otherwise, keep the old tail.
            Node *newTailP = newThreads ? curTail : nullptr;
            Node::Ptr newTail = Node::Ptr(newTailP, 1);

            if (L(lock, tail_.compare_exchange_strong(curTail, newTail))) break;
        }

        // Step five: now that we have the lock, if any threads came
        // in after us, indicate to the next one that it is at the
        // head of the queue
        if (newThreads) {
            // Next thread might not have written itself in, yet,
            // so we have to wait.
            Node *next;
            XEDGE(load_next, signal_next);
            while (!L(load_next, next = me.next)) delay();
            L(signal_next, next->ready = true);
        }

        //printf("full slowpath out\n");
    out:
        //printf("made it out of slowpath!\n");
        return;
    }