Пример #1
0
static PyObject*
SemaphoreObject_acquire(SemaphoreObject *self, PyObject *args, PyObject *kwargs)
{
    PyObject *timeout = NULL;
    PyObject *blocking = Py_True;
    long seconds = 0;

    static char *keywords[] = {"blocking", "timeout", NULL};
    
    DEBUG("self:%p", self);

    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:acquire", keywords, &blocking, &timeout)) {
        return NULL;
    }

    if (timeout == NULL) {
        return semaphore_acquire(self, blocking, seconds);
    } else if (timeout == Py_None) {
        return semaphore_acquire(self, blocking, seconds);
    } else if (PyLong_Check(timeout)) {
        seconds = PyLong_AsLong(timeout);
        if (seconds < 0) {
            PyErr_SetString(PyExc_ValueError, "timeout value out of range");
            return NULL;
        }
        return semaphore_acquire(self, blocking, seconds);
    }

    PyErr_SetString(PyExc_TypeError, "an integer is required");
    return NULL;
}
Пример #2
0
static PyObject*
rlock_acquire(RLockObject *self, PyObject *blocking, long timeout)
{
    PyObject *current, *res;
    int ret;


    DEBUG("self:%p", self);

    current = greenlet_getcurrent();
    Py_XDECREF(current);
    if (current == NULL) {
        return NULL;
    }

    if (self->owner == current) {
        self->count++;
        Py_RETURN_TRUE;
    }

    res = semaphore_acquire((SemaphoreObject*)self->block, blocking, timeout);
    if (res == NULL) {
        return NULL;
    }
    ret = PyObject_IsTrue(res);
    if (ret == -1) {
        return NULL;
    }
    if (ret) {
        self->owner = current;
        Py_INCREF(self->owner);
        self->count = 1;
    }
    return res;
}
Пример #3
0
static PyObject*
rlock_acquire_restore(RLockObject *self, PyObject *args)
{
    PyObject *res, *count, *owner, *state = NULL;
    long cnt;
     
    DEBUG("self:%p", self);
    if (!PyArg_ParseTuple(args,  "O:_acquire_restore", &state)) {
        return NULL;
    }

    res = semaphore_acquire((SemaphoreObject*)self->block, Py_True, 0);
    Py_XDECREF(res);
    if (res == NULL) {
        return NULL;
    }
    
    count = PyTuple_GET_ITEM(state, 0);
    cnt = PyLong_AS_LONG(count);

    owner = PyTuple_GET_ITEM(state, 1);

    self->count = cnt;
    Py_CLEAR(self->owner);
    self->owner = owner;
    Py_INCREF(self->owner);

    Py_RETURN_NONE;
}
Пример #4
0
uintptr_t
semaphore_acquire(semaphore_t semaphore, ips_node_t node)
{
    uintptr_t result;
    if (node == NULL)
    {
        ips_node_s node;
        result = semaphore_acquire(semaphore, &node);
        if (!result)
            ips_wait(&node);

        return result;
    }
    else
    {
        int irq = __irq_save();
        spinlock_acquire(&semaphore->lock);
        result = semaphore->count;
        if (result > 0)
        {
            if (-- semaphore->count == 0)
                SEMAPHORE_WAIT_CLEAR(semaphore);
          
            spinlock_release(&semaphore->lock);
            __irq_restore(irq);
               
            IPS_NODE_WAIT_CLEAR(node);
          
            return result;
        }
        else
        {
            ips_wait_init(node, current);

            if (SEMAPHORE_WAIT(semaphore))
            {
                node->next = SEMAPHORE_PTR(semaphore);
                node->prev = node->next->prev;
                node->next->prev = node;
                node->prev->next = node;
            }
            else
            {
                SEMAPHORE_WAIT_SET(semaphore);
                node->next = node->prev = node;
                SEMAPHORE_PTR_SET(semaphore, node);
            }

            spinlock_release(&semaphore->lock);
            __irq_restore(irq);
               
            return result;
        }         
    }
}
Пример #5
0
/** mailbox_allocate
 *      Description: allocates space for the mailbox in both direction
 */
status_t
dnp_mailbox_allocate(uint32_t channel_idx)
{

    dnp_mailbox_t *mb_in, *mb_out;
    status_t ret_val;

  watch (status_t)
  {
    mb_in =  (dnp_mailbox_t *)kernel_malloc(sizeof(dnp_mailbox_t),1); 
    mb_in -> status = 0;
    mb_in -> nr = mb_in ->nw = 0;
    ret_val = semaphore_create("mailbox_in", 0, &mb_in->sem);
    check (sem_error, ret_val == DNA_OK, DNA_ERROR);

    mb_out =  (dnp_mailbox_t *)kernel_malloc(sizeof(dnp_mailbox_t),1); 
    mb_out -> nr = mb_out ->nw = 0;
    mb_out ->status = 0;
    ret_val = semaphore_create("mailbox_out", 0, &mb_out->sem);
    check (sem_error, ret_val == DNA_OK, DNA_ERROR);

    /* Init all the semaphores */
    while(semaphore_acquire(mb_in->sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);
    while(semaphore_acquire(mb_out->sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);

    dnp_mailboxes[2*channel_idx] = mb_in;
    dnp_mailboxes[2*channel_idx+1] = mb_out;

    return DNA_OK;
  }
  rescue (sem_error)
  {
    EMSG("Failed: no sem initialized");
    leave;
  }

}
Пример #6
0
static int message_pump_start(void) {
	Semaphore handshake;

	log_debug("Starting message pump");

	semaphore_create(&handshake);

	thread_create(&_message_pump_thread, message_pump_thread_proc, &handshake);

	semaphore_acquire(&handshake);
	semaphore_destroy(&handshake);

	return _message_pump_hwnd == NULL ? -1 : 0;
}
Пример #7
0
status_t pc_tty_read (void * handler, void * destination,
    int64_t offset __attribute__((unused)), int32_t * p_count)
{
  pc_tty_t * tty = (pc_tty_t *) handler;

  if (tty -> buffer . empty) semaphore_acquire (tty -> sem_id, 1, 0, -1);

  *((char *)destination) = tty -> buffer . data;
  tty -> buffer . empty = true;

  int32_t one = 1;
  pc_tty_write(handler, &tty -> buffer . data, 0, &one); 

  *p_count = 1;
  return DNA_OK;
}
Пример #8
0
/** mailbox_pop_mail
 *      Description: pops a mail from the indicated  dnp event mailbox
 *      Verify that the mailbox is not empty before....
 */
dnp_status_t
dnp_mailbox_pop_mail(uint32_t virt_channel_id, dnp_event_t *event,
		     dnp_mailbox_direction_t dir, int blocking)
{
  dnp_mailbox_t *mb = NULL;
  dnp_event_t *slot;
  uint32_t channel_idx = dnp_channels_virt_to_dev[virt_channel_id];
  status_t res;
  int32_t flags = 0;

  if(channel_idx >= dnp_mailbox_nentries){   
    DMSG("[pop_] WARNING: channel %u(%u) has no mailbox\r\n",
	 virt_channel_id, channel_idx);
    return DNP_MAIL_ERROR;
  }
  mb = dnp_mailboxes[2*channel_idx + dir];
  
  if(!blocking)
    flags = DNA_RELATIVE_TIMEOUT;

  res = semaphore_acquire(mb->sem, 1, flags, 0);

  if(res != DNA_OK && !blocking){
    return DNP_NO_MAIL;
  }

  if(res != DNA_OK && blocking){
    EMSG("+++ got an error with semaphore : %x\n", res);
    return DNP_MAIL_ERROR;
  }

  if (!mb->status) return DNP_NO_MAIL;
  slot = &mb->mail[mb -> nr];
  
  *event = *slot;
  
  mb->status --;
  mb->nr = (mb -> nr + 1) %  DNP_MAILBOX_SIZE;
  
  return DNP_SUCCESS;
}
Пример #9
0
static int message_pump_start(void) {
	Semaphore handshake;

	log_debug("Starting message pump thread");

	semaphore_create(&handshake);

	thread_create(&_message_pump_thread, message_pump_thread_proc, &handshake);

	semaphore_acquire(&handshake);
	semaphore_destroy(&handshake);

	if (!_message_pump_running) {
		thread_destroy(&_message_pump_thread);

		log_error("Could not start message pump thread");

		return -1;
	}

	return 0;
}
Пример #10
0
void mutex_acquire(mutex mutex)
    //@ requires [?f]mutex(mutex, ?space, ?termScope, ?fs, ?level, ?inv) &*& obspace_obligation_set(space, ?obs) &*& level_all_above(obs, level) == true;
    //@ ensures mutex_held(mutex, space, termScope, fs, level, inv, f) &*& obspace_obligation_set(space, cons(level, obs)) &*& inv();
    //@ terminates;
{
    //@ open [f]mutex(mutex, space, termScope, fs, level, inv);
    //@ semaphore s = mutex->semaphore;
    //@ int blockeesId = mutex->blockeesId;
    //@ open [f*fs]obligation_space0(space, termScope);
    //@ assert [_]ghost_cell<pair<int, real> >(space, pair(?scope, ?olevel));
    //@ open obspace_obligation_set(space, obs);
    {
        /*@
        predicate sep() = [f*fs]atomic_space(olevel, obligation_space_inv(scope, termScope));
        predicate unsep(int items, int blockees) =
            [fs]term_perm(termScope, false) &*&
            [f*fs]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
            ghost_list<real>(blockeesId, ?blockeeFracs) &*& length(blockeeFracs) == blockees &*&
            [1/2]mutex->freeTokenFrac |-> ?ftf &*&
            items == 0 ?
                [ftf]mutex->freeToken |-> _ &*&
                [1 - real_sum(blockeeFracs)]obligation(scope, level)
            :
                [1/2]mutex->freeTokenFrac |-> _ &*& mutex->ownerToken |-> _ &*& items == 1 &*& inv();
        predicate P() = obligation_set(scope, obs) &*& [f]mutex->freeToken |-> _;
        predicate blocked() = ghost_list_member_handle<real>(blockeesId, ?frac) &*& obligation_set_calling(scope, obs, frac, level) &*& [f]mutex->freeToken |-> _;
        predicate Q() = obligation_set(scope, cons(level, obs)) &*& mutex->ownerToken |-> _ &*& inv() &*& [1/2]mutex->freeTokenFrac |-> f;
        lemma void sep()
            requires mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId)() &*& sep();
            ensures semaphore(s, ?items, ?blockees) &*& unsep(items, blockees);
        {
            open mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId)();
            open sep();

            assert semaphore(s, ?items, ?blockees);
            close unsep(items, blockees);
        }
        lemma void unsep()
            requires semaphore(s, ?items, ?blockees) &*& unsep(items, blockees);
            ensures mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId)() &*& sep();
        {
            open unsep(items, blockees);

            close mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId)();
            close sep();
        }
        lemma void block()
            requires atomic_space_level(olevel + 1) &*& unsep(0, ?blockees) &*& P();
            ensures atomic_space_level(olevel + 1) &*& unsep(0, blockees + 1) &*& blocked() &*& stop_perm(termScope);
        {
            open unsep(_, _);
            open P();

            assert ghost_list<real>(blockeesId, ?blockeeFracs);
            real fo = (1 - real_sum(blockeeFracs)) / 2;
            {
                predicate P1() = obligation_set(scope, obs) &*& [fo]obligation(scope, level);
                predicate Q1() = obligation_set_calling(scope, obs, fo, level) &*& stop_perm(termScope);
                lemma void body()
                    requires obligation_space_inv(scope, termScope)() &*& P1();
                    ensures obligation_space_inv(scope, termScope)() &*& Q1();
                {
                    open obligation_space_inv(scope, termScope)();
                    open P1();

                    call_obligation();

                    close Q1();
                    close obligation_space_inv(scope, termScope)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel, obligation_space_inv(scope, termScope), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }
            ghost_list_insert<real>(blockeesId, nil, blockeeFracs, (1 - real_sum(blockeeFracs)) / 2);

            close unsep(0, blockees + 1);
            close blocked();
        }
        lemma void unblock()
            requires atomic_space_level(olevel + 1) &*& unsep(0, ?blockees) &*& 0 < blockees &*& blocked() &*& stop_perm(termScope);
            ensures atomic_space_level(olevel + 1) &*& unsep(0, blockees - 1) &*& P();
        {
            open unsep(_, _);
            open blocked();

            assert ghost_list_member_handle<real>(blockeesId, ?frac);
            assert ghost_list<real>(blockeesId, ?blockeeFracs);
            ghost_list_match<real>();
            mem_remove_eq_append(frac, blockeeFracs);
            open exists(pair(?fs1, ?fs2));
            ghost_list_remove(blockeesId, fs1, fs2, frac);
            real_sum_append(fs1, cons(frac, fs2));
            real_sum_append(fs1, fs2);
            {
                predicate P1() = obligation_set_calling(scope, obs, frac, level) &*& stop_perm(termScope);
                predicate Q1() = obligation_set(scope, obs) &*& [frac]obligation(scope, level);
                lemma void body()
                    requires obligation_space_inv(scope, termScope)() &*& P1();
                    ensures obligation_space_inv(scope, termScope)() &*& Q1();
                {
                    open obligation_space_inv(scope, termScope)();
                    open P1();

                    return_obligation();

                    close Q1();
                    close obligation_space_inv(scope, termScope)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel, obligation_space_inv(scope, termScope), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }

            close unsep(0, blockees - 1);
            close P();
        }
        lemma void success()
            requires atomic_space_level(olevel + 1) &*& unsep(?items, 0) &*& 0 < items &*& P();
            ensures atomic_space_level(olevel + 1) &*& unsep(items - 1, 0) &*& Q();
        {
            open unsep(_, _);
            open P();

            {
                predicate P1() = obligation_set(scope, obs);
                predicate Q1() = obligation_set(scope, cons(level, obs)) &*& obligation(scope, level);
                lemma void body()
                    requires obligation_space_inv(scope, termScope)() &*& P1();
                    ensures obligation_space_inv(scope, termScope)() &*& Q1();
                {
                    open obligation_space_inv(scope, termScope)();
                    open P1();

                    create_obligation(level);

                    close Q1();
                    close obligation_space_inv(scope, termScope)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel, obligation_space_inv(scope, termScope), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }

            assert ghost_list<real>(blockeesId, ?blockeeFracs);
            switch (blockeeFracs) { case nil: case cons(h, t): }
            mutex->freeTokenFrac = f;

            close unsep(items - 1, 0);
            close Q();
        }
        @*/
        /*@
        produce_lemma_function_pointer_chunk(dummy_lemma) : inv_has_term_perm(termScope, mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId))() {
            call();
            open mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId)();
        };
        @*/
        //@ leak is_inv_has_term_perm(_, _, _);
        //@ produce_lemma_function_pointer_chunk(sep) : semaphore_sep(olevel + 1, mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId), s, sep, unsep)() { call(); };
        //@ produce_lemma_function_pointer_chunk(unsep) : semaphore_unsep(olevel + 1, mutex_inv(mutex, termScope, fs, s, scope, level, inv, blockeesId), s, sep, unsep)() { call(); };
        //@ produce_lemma_function_pointer_chunk(block) : semaphore_acquire_block(termScope, olevel + 1, unsep, P, blocked)() { call(); };
        //@ produce_lemma_function_pointer_chunk(unblock) : semaphore_acquire_unblock(termScope, olevel + 1, unsep, blocked, P)() { call(); };
        //@ produce_lemma_function_pointer_chunk(success) : semaphore_acquire_success(olevel + 1, unsep, P, Q)() { call(); };
        //@ close sep();
        //@ close P();
        //@ close exists(olevel + 1);
        semaphore_acquire(mutex->semaphore);
        //@ open Q();
        //@ open sep();
    }
    //@ close [f*fs]obligation_space0(space, termScope);
    //@ close mutex_held(mutex, space, termScope, fs, level, inv, f);
    //@ close obspace_obligation_set(space, cons(level, obs));
}
Пример #11
0
static PyObject*
ConditionObject_wait(ConditionObject *self, PyObject *args, PyObject *kwargs)
{
    PyObject *timeout = NULL;
    PyObject *res, *result, *saved_state = NULL;
    SemaphoreObject *waiter;
    long seconds = 0;
    
    static char *keywords[] = {"timeout", NULL};

    DEBUG("self:%p", self);

    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:wait", keywords, &timeout)) {
        return NULL;
    }

    if (timeout == NULL) {
        seconds = 0;
    } else if (timeout == Py_None) {
        seconds = 0;
    } else if (PyLong_Check(timeout)) {
        seconds = PyLong_AsLong(timeout);
        if (seconds < 0) {
            PyErr_SetString(PyExc_ValueError, "timeout value out of range");
            return NULL;
        }
    } else {
        PyErr_SetString(PyExc_TypeError, "an integer is required");
        return NULL;
    }


    res = call_method((PyObject*)self, "_is_owned");
    if (res == NULL) {
        return NULL;
    }
    
    if (PyObject_Not(res)) {
        Py_DECREF(res);
        PyErr_SetString(PyExc_RuntimeError, "cannot release un-acquired lock");
        return NULL;
    }
    Py_DECREF(res);
    
    waiter = (SemaphoreObject*)PyObject_CallFunctionObjArgs((PyObject*)&SemaphoreObjectType, NULL);
    if (waiter == NULL) {
        return NULL;
    }

    res = semaphore_acquire(waiter, Py_True, 0);
    Py_XDECREF(res);
    if (res == NULL) {
        return NULL;
    }

    if (PyList_Append(self->waiters, (PyObject*)waiter) == -1) {
        return NULL;
    }

    saved_state = call_method((PyObject*)self, "_release_save");
    if (saved_state == NULL) {
        Py_DECREF(waiter);
        return NULL;
    }

    res = semaphore_acquire(waiter, Py_True, seconds);
    
    result = call_method_args1((PyObject*)self, "_acquire_restore", saved_state);

    Py_DECREF(saved_state);
    if (result == NULL) {
        return NULL;
    }

    Py_DECREF(waiter);
    return res;
}
Пример #12
0
void sema_acquire(sema sema)
    /*@
    requires
        sema_handle(sema, ?f, ?space, ?termScope, ?fs, ?level, ?creditObject, ?inv, ?releaseTokens) &*&
        obspace_obligation_set(space, ?obs) &*& level_all_above(obs, level) == true &*&
        credit(creditObject);
    @*/
    /*@
    ensures
        sema_handle(sema, f, space, termScope, fs, level, creditObject, inv, releaseTokens - 1) &*&
        obspace_obligation_set(space, obs) &*&
        inv();
    @*/
    //@ terminates;
{
    //@ open sema_handle(sema, f, space, termScope, fs, level, creditObject, inv, releaseTokens);
    //@ open [f/2]sema(sema, space, termScope, fs, level, creditObject, inv, ?countingId);
    //@ open obspace(fs, space, termScope);
    //@ open [?fs_]obligation_space0(space, termScope);
    //@ open [_]obspace_credit_object_info(creditObject, space, level);
    //@ open obspace_obligation_set(space, obs);
    //@ semaphore s = sema->semaphore;
    //@ assert [_]ghost_cell<pair<int, real> >(space, pair(?scope, ?olevel));
    {
        /*@
        predicate sep() =
            [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
            [_]atomic_space(olevel + 1, credit_object_(creditObject, scope, level)) &*&
            true;
        predicate unsep(int items, int blockees) =
            [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
            [_]atomic_space(olevel + 1, credit_object_(creditObject, scope, level)) &*&
            [fs]term_perm(termScope, false) &*&
            credit_object_handle(creditObject, items, blockees) &*&
            n_times(items, inv) &*&
            n_times(items, sema_release_token_(sema, space, termScope, level, creditObject, inv));
        predicate P() = obligation_set(scope, obs) &*& credit(creditObject);
        predicate blocked() = obligation_set_calling(scope, obs, 1, level);
        predicate Q() = obligation_set(scope, obs) &*& inv() &*& sema_release_token(sema, space, termScope, level, creditObject, inv);
        lemma void sep()
            requires sema_inv(sema, s, space, termScope, fs, level, creditObject, inv)() &*& sep();
            ensures semaphore(s, ?items, ?blockees) &*& unsep(items, blockees);
        {
            open sema_inv(sema, s, space, termScope, fs, level, creditObject, inv)();
            open sep();

            assert semaphore(s, ?items, ?blockees);
            
            close unsep(items, blockees);
        }
        lemma void unsep()
            requires semaphore(s, ?items, ?blockees) &*& unsep(items, blockees);
            ensures sema_inv(sema, s, space, termScope, fs, level, creditObject, inv)() &*& sep();
        {
            open unsep(_, _);
            
            close sep();
            close sema_inv(sema, s, space, termScope, fs, level, creditObject, inv)();
        }
        lemma void block()
            requires atomic_space_level(olevel + 2) &*& unsep(0, ?blockees) &*& P();
            ensures atomic_space_level(olevel + 2) &*& unsep(0, blockees + 1) &*& blocked() &*& stop_perm(termScope);
        {
            open unsep(_, _);
            open P();

            {
                predicate P1() =
                    [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
                    obligation_set(scope, obs) &*&
                    credit_object_handle(creditObject, 0, blockees) &*&
                    credit(creditObject);
                predicate Q1() =
                    [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
                    credit_object_handle(creditObject, 0, blockees + 1) &*&
                    obligation_set_calling(scope, obs, 1, level) &*&
                    stop_perm(termScope);
                lemma void body()
                    requires atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& P1();
                    ensures atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& Q1();
                {
                    open credit_object_(creditObject, scope, level)();
                    open P1();

                    credit_object_block();
                    {
                        predicate P2() =
                            obligation_set(scope, obs) &*& obligation(scope, level);
                        predicate Q2() =
                            obligation_set_calling(scope, obs, 1, level) &*& stop_perm(termScope);
                        lemma void body2()
                            requires atomic_space_level(olevel) &*& obligation_space_inv(scope, termScope)() &*& P2();
                            ensures atomic_space_level(olevel) &*& obligation_space_inv(scope, termScope)() &*& Q2();
                        {
                            open obligation_space_inv(scope, termScope)();
                            open P2();

                            call_obligation();

                            close Q2();
                            close obligation_space_inv(scope, termScope)();
                        }
                        produce_lemma_function_pointer_chunk(body2) : atomic_noop_body(olevel, obligation_space_inv(scope, termScope), P2, Q2)() { call(); } {
                            close P2();
                            atomic_noop_nested();
                            open Q2();
                        }
                    }

                    close Q1();
                    close credit_object_(creditObject, scope, level)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel + 1, credit_object_(creditObject, scope, level), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }

            close unsep(0, blockees + 1);
            close blocked();
        }
        lemma void unblock()
            requires atomic_space_level(olevel + 2) &*& unsep(0, ?blockees) &*& 0 < blockees &*& blocked() &*& stop_perm(termScope);
            ensures atomic_space_level(olevel + 2) &*& unsep(0, blockees - 1) &*& P();
        {
            open unsep(_, _);
            open blocked();

            {
                predicate P1() =
                    [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
                    obligation_set_calling(scope, obs, 1, level) &*&
                    credit_object_handle(creditObject, 0, blockees) &*&
                    stop_perm(termScope);
                predicate Q1() =
                    [fs_]atomic_space(olevel, obligation_space_inv(scope, termScope)) &*&
                    credit_object_handle(creditObject, 0, blockees - 1) &*&
                    obligation_set(scope, obs) &*&
                    credit(creditObject);
                lemma void body()
                    requires atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& P1();
                    ensures atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& Q1();
                {
                    open credit_object_(creditObject, scope, level)();
                    open P1();

                    {
                        predicate P2() =
                            obligation_set_calling(scope, obs, 1, level) &*& stop_perm(termScope);
                        predicate Q2() =
                            obligation_set(scope, obs) &*& obligation(scope, level);
                        lemma void body2()
                            requires atomic_space_level(olevel) &*& obligation_space_inv(scope, termScope)() &*& P2();
                            ensures atomic_space_level(olevel) &*& obligation_space_inv(scope, termScope)() &*& Q2();
                        {
                            open obligation_space_inv(scope, termScope)();
                            open P2();

                            return_obligation();

                            close Q2();
                            close obligation_space_inv(scope, termScope)();
                        }
                        produce_lemma_function_pointer_chunk(body2) : atomic_noop_body(olevel, obligation_space_inv(scope, termScope), P2, Q2)() { call(); } {
                            close P2();
                            atomic_noop_nested();
                            open Q2();
                        }
                    }
                    credit_object_unblock();

                    close Q1();
                    close credit_object_(creditObject, scope, level)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel + 1, credit_object_(creditObject, scope, level), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }

            close unsep(0, blockees - 1);
            close P();
        }
        lemma void acquire()
            requires atomic_space_level(olevel + 2) &*& unsep(?items, 0) &*& 0 < items &*& P();
            ensures atomic_space_level(olevel + 2) &*& unsep(items - 1, 0) &*& Q();
        {
            open unsep(_, _);
            open P();

            {
                predicate P1() =
                    credit_object_handle(creditObject, items, 0) &*&
                    credit(creditObject);
                predicate Q1() =
                    credit_object_handle(creditObject, items - 1, 0);
                lemma void body()
                    requires atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& P1();
                    ensures atomic_space_level(olevel + 1) &*& credit_object_(creditObject, scope, level)() &*& Q1();
                {
                    open credit_object_(creditObject, scope, level)();
                    open P1();

                    credit_object_acquire();

                    close Q1();
                    close credit_object_(creditObject, scope, level)();
                }
                produce_lemma_function_pointer_chunk(body) : atomic_noop_body(olevel + 1, credit_object_(creditObject, scope, level), P1, Q1)() { call(); } {
                    close P1();
                    atomic_noop_nested();
                    open Q1();
                }
            }

            open n_times(items, inv);
            open n_times(items, sema_release_token_(sema, space, termScope, level, creditObject, inv));
            open sema_release_token_(sema, space, termScope, level, creditObject, inv)();
            close Q();
            close unsep(items - 1, 0);
        }
        lemma void dummy_lemma()
            requires true;
            ensures true;
        {}
        @*/
        /*@
        produce_lemma_function_pointer_chunk(dummy_lemma) : inv_has_term_perm(termScope, sema_inv(sema, s, space, termScope, fs, level, creditObject, inv))() {
            call();
            open sema_inv(sema, s, space, termScope, fs, level, creditObject, inv)();
        };
        @*/
        //@ leak is_inv_has_term_perm(_, _, _);
        //@ produce_lemma_function_pointer_chunk(sep) : semaphore_sep(olevel + 2, sema_inv(sema, s, space, termScope, fs, level, creditObject, inv), s, sep, unsep)() { call(); };
        //@ produce_lemma_function_pointer_chunk(unsep) : semaphore_unsep(olevel + 2, sema_inv(sema, s, space, termScope, fs, level, creditObject, inv), s, sep, unsep)() { call(); };
        //@ produce_lemma_function_pointer_chunk(block) : semaphore_acquire_block(termScope, olevel + 2, unsep, P, blocked)() { call(); };
        //@ produce_lemma_function_pointer_chunk(unblock) : semaphore_acquire_unblock(termScope, olevel + 2, unsep, blocked, P)() { call(); };
        //@ produce_lemma_function_pointer_chunk(acquire) : semaphore_acquire_success(olevel + 2, unsep, P, Q)() { call(); };
        //@ close sep();
        //@ close P();
        //@ close exists(olevel + 2);
        semaphore_acquire(sema->semaphore);
        //@ open sep();
        //@ open Q();
    }
    //@ close [fs_]obligation_space0(space, termScope);
    //@ close [f/2]obspace(fs, space, termScope);
    //@ close [f/2]sema(sema, space, termScope, fs, level, creditObject, inv, countingId);
    //@ open sema_release_token(sema, space, termScope, level, creditObject, inv);
    //@ assert counting_ticket(countingId, ?frac);
    //@ close [frac]sema_(sema)();
    //@ destroy_counting_ticket(countingId);
    //@ close sema_handle(sema, f, space, termScope, fs, level, creditObject, inv, releaseTokens - 1);
    //@ close obspace_obligation_set(space, obs);
}
Пример #13
0
static PyObject*
SemaphoreObject_enter(SemaphoreObject *self, PyObject *args)
{
    return semaphore_acquire(self, Py_True, 0);
}
Пример #14
0
status_t d940_ethernet_write (void * handler, void * source, int64_t offset,
                                  int32_t * p_count) {
  d940_eth_data_t *   pdata = (d940_eth_data_t *) handler;
  d940_eth_t          d940_ethernet_device = pdata->dev;
  d940_eth_ncr_t      ncr;
  d940_eth_tsr_t      tsr;
  int32_t             buffer_index;
  void *              data = source;
  int32_t             buffer_size;
  int32_t             i;
  int32_t             frame_size = *p_count;
  interrupt_status_t  it_status;

  if(frame_size < 0)
  {
    return DNA_BAD_ARGUMENT;
  }

  if(pdata->tx_write == 0)
  {
    pdata->tx_write = *((int32_t *) data);
    return DNA_OK;
  }

  it_status = cpu_trap_mask_and_backup();
  lock_acquire(&pdata->lock);
   
  /* Check/Clear the status of the transmit */
  cpu_read(UINT32, &(d940_ethernet_device->tsr.raw), tsr.raw);
  cpu_write(UINT32, &(d940_ethernet_device->tsr.raw), tsr.raw);
  
  lock_release(&pdata->lock);
  cpu_trap_restore(it_status);
 
  if(tsr.bits.und)
  {
    log(INFO_LEVEL, "Underrun: Clear the transmit buffer list");
    
    it_status = cpu_trap_mask_and_backup();
    lock_acquire(&pdata->lock);
   
    /* Stop the transmit in case of underrun */
    cpu_read(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
    ncr.bits.te = 0;
    cpu_write(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
    ncr.bits.te = 1;
    cpu_write(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
    
    lock_release(&pdata->lock);
    cpu_trap_restore(it_status);
    
    /* ReInit the transmit */
    pdata->tx_tail = 0;
  
    /* ReInit the semaphore */
    while(semaphore_acquire(pdata->tx_sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);
    semaphore_release(pdata->tx_sem, TX_PACKET_LIMIT, 0);
  
    /* Clear the buffers */
    for(i = 0; i < D940_ETH_TX_BUFFER_COUNT; i++)
    {
      pdata->transmit_descs[i].used = 1;
    }
  }

  buffer_index = pdata->tx_tail;

  while(frame_size > 0)
  {
    cpu_cache_invalidate(CPU_CACHE_DATA, &pdata->transmit_descs[buffer_index],
      sizeof(struct tbde));
    
    buffer_size = D940_ETH_TX_BUFFER_SIZE;
    if(buffer_size > frame_size)
    {
      buffer_size = frame_size;
    }

    /* Copy in the transmit buffer */
    dna_memcpy(&pdata->transmit_buffers[buffer_index * D940_ETH_TX_BUFFER_SIZE],
      data, buffer_size);

    data += buffer_size;
    frame_size -= buffer_size;
    pdata->tx_write -= buffer_size;

    /* Set the transmit buffer as ready to send */
    pdata->transmit_descs[buffer_index].len = buffer_size;
    pdata->transmit_descs[buffer_index].used = 0;

    /* Last chunk ? */
    if(pdata->tx_write != 0)
    {
      pdata->transmit_descs[buffer_index].last = 0;
    }
    else
    {
      pdata->transmit_descs[buffer_index].last = 1;
    }

    buffer_index = NEXT_TX_BUFFER(buffer_index);
  }

  /* Next Packet invalid */
  pdata->transmit_descs[buffer_index].used = 1;
  pdata->tx_tail = buffer_index;

  if(pdata->tx_write == 0)
  {
    /* Force to flush the cache */
    cpu_cache_sync();
  
    it_status = cpu_trap_mask_and_backup();
    lock_acquire(&pdata->lock);
  
    /* Restart the transmission */
    cpu_read(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
    ncr.bits.tstart = 1;
    cpu_write(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
  
    lock_release(&pdata->lock);
    cpu_trap_restore(it_status);

    semaphore_acquire(pdata->tx_sem, 1, 0, 0);
  }
  return DNA_OK;
}
Пример #15
0
int event_run_platform(Array *event_sources, int *running) {
	int result = -1;
	int i;
	EventSource *event_source;
	fd_set *fd_read_set;
	fd_set *fd_write_set;
	int ready;
	int handled;
	uint8_t byte = 1;
	int rc;
	int event_source_count;
	int received_events;

	if (event_add_source(_usb_poller.ready_pipe[0], EVENT_SOURCE_TYPE_GENERIC,
	                     EVENT_READ, event_forward_usb_events,
	                     event_sources) < 0) {
		return -1;
	}

	*running = 1;

	_usb_poller.running = 1;

	thread_create(&_usb_poller.thread, event_poll_usb_events, event_sources);

	event_cleanup_sources();

	while (*running) {
		// update SocketSet arrays
		if (event_reserve_socket_set(&_socket_read_set, // FIXME: this over allocates
		                             event_sources->count) < 0) {
			log_error("Could not resize socket read set: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		if (event_reserve_socket_set(&_socket_write_set, // FIXME: this over allocates
		                             event_sources->count) < 0) {
			log_error("Could not resize socket write set: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		_socket_read_set->count = 0;
		_socket_write_set->count = 0;

		for (i = 0; i < event_sources->count; i++) {
			event_source = array_get(event_sources, i);

			if (event_source->type != EVENT_SOURCE_TYPE_GENERIC) {
				continue;
			}

			if (event_source->events & EVENT_READ) {
				_socket_read_set->sockets[_socket_read_set->count++] = event_source->handle;
			}

			if (event_source->events & EVENT_WRITE) {
				_socket_write_set->sockets[_socket_write_set->count++] = event_source->handle;
			}
		}

		// start to select
		log_debug("Starting to select on %d + %d %s event source(s)",
		          _socket_read_set->count, _socket_write_set->count,
		          event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, 0));

		semaphore_release(&_usb_poller.resume);

		fd_read_set = event_get_socket_set_as_fd_set(_socket_read_set);
		fd_write_set = event_get_socket_set_as_fd_set(_socket_write_set);

		ready = select(0, fd_read_set, fd_write_set, NULL, NULL);

		if (_usb_poller.running) {
			log_debug("Sending suspend signal to USB poll thread");

			if (usbi_write(_usb_poller.suspend_pipe[1], &byte, 1) < 0) {
				log_error("Could not write to USB suspend pipe");

				_usb_poller.stuck = 1;
				*running = 0;

				goto cleanup;
			}

			semaphore_acquire(&_usb_poller.suspend);

			if (usbi_read(_usb_poller.suspend_pipe[0], &byte, 1) < 0) {
				log_error("Could not read from USB suspend pipe");

				_usb_poller.stuck = 1;
				*running = 0;

				goto cleanup;
			}
		}

		if (ready < 0) {
			rc = ERRNO_WINAPI_OFFSET + WSAGetLastError();

			if (rc == ERRNO_WINAPI_OFFSET + WSAEINTR) {
				continue;
			}

			log_error("Could not select on %s event sources: %s (%d)",
			          event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, 0),
			          get_errno_name(rc), rc);

			*running = 0;

			goto cleanup;
		}

		// handle select result
		log_debug("Select returned %d %s event source(s) as ready",
		          ready,
		          event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, 0));

		handled = 0;

		// cache event source count here to avoid looking at new event
		// sources that got added during the event handling
		event_source_count = event_sources->count;

		for (i = 0; i < event_source_count && ready > handled; ++i) {
			event_source = array_get(event_sources, i);
			received_events = 0;

			if (event_source->type != EVENT_SOURCE_TYPE_GENERIC) {
				continue;
			}

			if (FD_ISSET(event_source->handle, fd_read_set)) {
				received_events |= EVENT_READ;
			}

			if (FD_ISSET(event_source->handle, fd_write_set)) {
				received_events |= EVENT_WRITE;
			}

			if (received_events == 0) {
				continue;
			}

			if (event_source->state != EVENT_SOURCE_STATE_NORMAL) {
				log_debug("Ignoring %s event source (handle: %d, received events: %d) marked as removed at index %d",
				          event_get_source_type_name(event_source->type, 0),
				          event_source->handle, received_events, i);
			} else {
				log_debug("Handling %s event source (handle: %d, received events: %d) at index %d",
				          event_get_source_type_name(event_source->type, 0),
				          event_source->handle, received_events, i);

				if (event_source->function != NULL) {
					event_source->function(event_source->opaque);
				}
			}

			++handled;

			if (!*running) {
				break;
			}
		}

		if (ready == handled) {
			log_debug("Handled all ready %s event sources",
			          event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, 0));
		} else {
			log_warn("Handled only %d of %d ready %s event source(s)",
			         handled, ready,
			         event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, 0));
		}

		// now remove event sources that got marked as removed during the
		// event handling
		event_cleanup_sources();
	}

	result = 0;

cleanup:
	if (_usb_poller.running && !_usb_poller.stuck) {
		_usb_poller.running = 0;

		log_debug("Stopping USB poll thread");

		if (usbi_write(_usb_poller.suspend_pipe[1], &byte, 1) < 0) {
			log_error("Could not write to USB suspend pipe");
		} else {
			semaphore_release(&_usb_poller.resume);
			thread_join(&_usb_poller.thread);
		}
	}

	event_remove_source(_usb_poller.ready_pipe[0], EVENT_SOURCE_TYPE_GENERIC);

	return result;
}
Пример #16
0
static void event_poll_usb_events(void *opaque) {
	Array *event_sources = opaque;
	int count;
	struct usbi_pollfd *pollfd;
	EventSource *event_source;
	int i;
	int k;
	int ready;
	uint8_t byte = 0;

	log_debug("Started USB poll thread");

	for (;;) {
		semaphore_acquire(&_usb_poll_resume);

		log_event_debug("Resumed USB poll thread");

		if (!_usb_poll_running) {
			goto cleanup;
		}

		_usb_poll_pollfds_ready = 0;

		// update pollfd array
		count = 0;

		for (i = 0; i < event_sources->count; ++i) {
			event_source = array_get(event_sources, i);

			if (event_source->type == EVENT_SOURCE_TYPE_USB) {
				++count;
			}
		}

		if (count == 0) {
			goto suspend;
		}

		++count; // add the suspend pipe

		if (array_resize(&_usb_poll_pollfds, count, NULL) < 0) {
			log_error("Could not resize USB pollfd array: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		pollfd = array_get(&_usb_poll_pollfds, 0);

		pollfd->fd = _usb_poll_suspend_pipe[0];
		pollfd->events = USBI_POLLIN;
		pollfd->revents = 0;

		for (i = 0, k = 1; i < event_sources->count; ++i) {
			event_source = array_get(event_sources, i);

			if (event_source->type != EVENT_SOURCE_TYPE_USB) {
				continue;
			}

			pollfd = array_get(&_usb_poll_pollfds, k);

			pollfd->fd = event_source->handle;
			pollfd->events = (short)event_source->events;
			pollfd->revents = 0;

			++k;
		}

		// start to poll
		log_event_debug("Starting to poll on %d %s event source(s)",
		                _usb_poll_pollfds.count - 1,
		                event_get_source_type_name(EVENT_SOURCE_TYPE_USB, false));

	retry:
		ready = usbi_poll((struct usbi_pollfd *)_usb_poll_pollfds.bytes,
		                  _usb_poll_pollfds.count, -1);

		if (ready < 0) {
			if (errno_interrupted()) {
				log_debug("Poll got interrupted, retrying");

				goto retry;
			}

			log_error("Could not poll on %s event source(s): %s (%d)",
			          event_get_source_type_name(EVENT_SOURCE_TYPE_USB, false),
			          get_errno_name(errno), errno);

			goto suspend;
		}

		if (ready == 0) {
			goto suspend;
		}

		// handle poll result
		pollfd = array_get(&_usb_poll_pollfds, 0);

		if (pollfd->revents != 0) {
			log_event_debug("Received suspend signal");

			--ready; // remove the suspend pipe
		}

		if (ready == 0) {
			goto suspend;
		}

		log_event_debug("Poll returned %d %s event source(s) as ready", ready,
		                event_get_source_type_name(EVENT_SOURCE_TYPE_USB, false));

		_usb_poll_pollfds_ready = ready;

		if (pipe_write(&_usb_poll_ready_pipe, &byte, sizeof(byte)) < 0) {
			log_error("Could not write to USB ready pipe: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

	suspend:
		log_event_debug("Suspending USB poll thread");

		semaphore_release(&_usb_poll_suspend);
	}

cleanup:
	log_debug("Stopped USB poll thread");

	semaphore_release(&_usb_poll_suspend);

	_usb_poll_running = false;
}
Пример #17
0
// Main SPI loop. This runs independently from the brickd event thread.
// Data between RED Brick and SPI slave is exchanged every 500us.
// If there is no data to be send, we cycle through the slaves and request
// data. If there is data to be send the slave that ought to receive
// the data gets priority. This can greatly reduce latency in a big stack.
static void red_stack_spi_thread(void *opaque) {
	REDStackPacket *packet_to_spi = NULL;
	uint8_t stack_address_cycle;
	int ret;

	(void)opaque;

	do {
		stack_address_cycle = 0;
		_red_stack_reset_detected = 0;
		_red_stack.slave_num = 0;
		red_stack_spi_create_routing_table();

		_red_stack_spi_thread_running = false;

		if (_red_stack.slave_num > 0) {
			_red_stack_spi_thread_running = true;
		}

		// Ignore resets that we received in the meantime to prevent race conditions.
		_red_stack_reset_detected = 0;

		while (_red_stack_spi_thread_running) {
			REDStackSlave *slave = &_red_stack.slaves[stack_address_cycle];
			REDStackPacket *request = NULL;
			memset(&_red_stack.packet_from_spi, 0, sizeof(Packet));

			// Get packet from queue. The queue contains that are to be
			// send over SPI. It is filled through from the main brickd
			// event thread, so we have to make sure that there is not race
			// condition.
			if(slave->next_packet_empty) {
				slave->next_packet_empty = false;
				packet_to_spi = NULL;
			} else {
				mutex_lock(&(slave->packet_queue_mutex));
				packet_to_spi = queue_peek(&slave->packet_to_spi_queue);
				mutex_unlock(&(slave->packet_queue_mutex));
			}

			stack_address_cycle++;

			if (stack_address_cycle >= _red_stack.slave_num) {
				stack_address_cycle = 0;
			}

			// Set request if we have a packet to send
			if (packet_to_spi != NULL) {
				log_packet_debug("Packet will now be send over SPI (%s)",
				                 packet_get_request_signature(packet_signature, &packet_to_spi->packet));

				request = packet_to_spi;
			}

			ret = red_stack_spi_transceive_message(request, &_red_stack.packet_from_spi, slave);

			if ((ret & RED_STACK_TRANSCEIVE_RESULT_MASK_SEND) == RED_STACK_TRANSCEIVE_RESULT_SEND_OK) {
				if ((!((ret & RED_STACK_TRANSCEIVE_RESULT_MASK_READ) == RED_STACK_TRANSCEIVE_RESULT_READ_ERROR))) {
					// If we send a packet it must have come from the queue, so we can
					// pop it from the queue now.
					// If the sending didn't work (for whatever reason), we don't pop it
					// and therefore we will automatically try to send it again in the next cycle.
					mutex_lock(&(slave->packet_queue_mutex));
					queue_pop(&slave->packet_to_spi_queue, NULL);
					mutex_unlock(&(slave->packet_queue_mutex));
				}
			}

			// If we received a packet, we will dispatch it immediately.
			// We have some time until we try the next SPI communication anyway.
			if ((ret & RED_STACK_TRANSCEIVE_RESULT_MASK_READ) == RED_STACK_TRANSCEIVE_RESULT_READ_OK) {
				// TODO: Check again if packet is valid?
				// We did already check the hash.

				// Before the dispatching we insert the stack position into an enumerate message
				red_stack_spi_insert_position(slave);

				red_stack_spi_request_dispatch_response_event();
				// Wait until message is dispatched, so we don't overwrite it
				// accidentally.
				semaphore_acquire(&_red_stack_dispatch_packet_from_spi_semaphore);
			}

			SLEEP_NS(0, 1000*_red_stack_spi_poll_delay);
		}

		if (_red_stack.slave_num == 0) {
			pthread_mutex_lock(&_red_stack_wait_for_reset_mutex);
			// Use helper to be save against spurious wakeups
			_red_stack_wait_for_reset_helper = 0;

			while (_red_stack_wait_for_reset_helper == 0) {
				pthread_cond_wait(&_red_stack_wait_for_reset_cond, &_red_stack_wait_for_reset_mutex);
			}

			pthread_mutex_unlock(&_red_stack_wait_for_reset_mutex);
		}

		if (_red_stack_reset_detected > 0) {
			red_stack_spi_handle_reset();
		}
	} while (_red_stack_reset_detected > 0);
}
Пример #18
0
int event_run_platform(Array *event_sources, bool *running, EventCleanupFunction cleanup) {
	int result = -1;
	int i;
	EventSource *event_source;
	fd_set *fd_read_set;
	fd_set *fd_write_set;
	fd_set *fd_error_set;
	int ready;
	int handled;
	uint8_t byte = 1;
	int rc;
	int event_source_count;
	uint32_t received_events;

	if (event_add_source(_usb_poll_ready_pipe.read_end,
	                     EVENT_SOURCE_TYPE_GENERIC, EVENT_READ,
	                     event_forward_usb_events, event_sources) < 0) {
		return -1;
	}

	*running = true;
	_usb_poll_running = true;

	thread_create(&_usb_poll_thread, event_poll_usb_events, event_sources);

	cleanup();
	event_cleanup_sources();

	while (*running) {
		// update SocketSet arrays
		if (event_reserve_socket_set(&_socket_read_set, // FIXME: this over-allocates
		                             event_sources->count) < 0) {
			log_error("Could not resize socket read set: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		if (event_reserve_socket_set(&_socket_write_set, // FIXME: this over-allocates
		                             event_sources->count) < 0) {
			log_error("Could not resize socket write set: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		if (event_reserve_socket_set(&_socket_error_set, // FIXME: this over-allocates
		                             event_sources->count) < 0) {
			log_error("Could not resize socket error set: %s (%d)",
			          get_errno_name(errno), errno);

			goto cleanup;
		}

		_socket_read_set->count = 0;
		_socket_write_set->count = 0;
		_socket_error_set->count = 0;

		for (i = 0; i < event_sources->count; ++i) {
			event_source = array_get(event_sources, i);

			if (event_source->type != EVENT_SOURCE_TYPE_GENERIC) {
				continue;
			}

			if ((event_source->events & EVENT_READ) != 0) {
				_socket_read_set->sockets[_socket_read_set->count++] = event_source->handle;
			}

			if ((event_source->events & EVENT_WRITE) != 0) {
				_socket_write_set->sockets[_socket_write_set->count++] = event_source->handle;
			}

			if ((event_source->events & EVENT_PRIO) != 0) {
				log_error("Event prio is not supported");
			}

			if ((event_source->events & EVENT_ERROR) != 0) {
				_socket_error_set->sockets[_socket_error_set->count++] = event_source->handle;
			}
		}

		// start to select
		log_event_debug("Starting to select on %d + %d + %d %s event source(s)",
		                _socket_read_set->count, _socket_write_set->count, _socket_error_set->count,
		                event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, false));

		semaphore_release(&_usb_poll_resume);

		fd_read_set = event_get_socket_set_as_fd_set(_socket_read_set);
		fd_write_set = event_get_socket_set_as_fd_set(_socket_write_set);
		fd_error_set = event_get_socket_set_as_fd_set(_socket_error_set);

		ready = select(0, fd_read_set, fd_write_set, fd_error_set, NULL);

		if (_usb_poll_running) {
			log_event_debug("Sending suspend signal to USB poll thread");

			if (usbi_write(_usb_poll_suspend_pipe[1], &byte, 1) < 0) {
				log_error("Could not write to USB suspend pipe");

				_usb_poll_stuck = true;

				goto cleanup;
			}

			semaphore_acquire(&_usb_poll_suspend);

			if (usbi_read(_usb_poll_suspend_pipe[0], &byte, 1) < 0) {
				log_error("Could not read from USB suspend pipe");

				_usb_poll_stuck = true;

				goto cleanup;
			}
		}

		if (ready == SOCKET_ERROR) {
			rc = ERRNO_WINAPI_OFFSET + WSAGetLastError();

			if (rc == ERRNO_WINAPI_OFFSET + WSAEINTR) {
				continue;
			}

			log_error("Could not select on %s event sources: %s (%d)",
			          event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, false),
			          get_errno_name(rc), rc);

			goto cleanup;
		}

		// handle select result
		log_event_debug("Select returned %d %s event source(s) as ready",
		                ready, event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, false));

		handled = 0;

		// cache event source count here to avoid looking at new event
		// sources that got added during the event handling
		event_source_count = event_sources->count;

		for (i = 0; *running && i < event_source_count && ready > handled; ++i) {
			event_source = array_get(event_sources, i);
			received_events = 0;

			if (event_source->type != EVENT_SOURCE_TYPE_GENERIC) {
				continue;
			}

			if (FD_ISSET(event_source->handle, fd_read_set)) {
				received_events |= EVENT_READ;
			}

			if (FD_ISSET(event_source->handle, fd_write_set)) {
				received_events |= EVENT_WRITE;
			}

			if (FD_ISSET(event_source->handle, fd_error_set)) {
				received_events |= EVENT_ERROR;
			}

			if (received_events == 0) {
				continue;
			}

			event_handle_source(event_source, received_events);

			++handled;
		}

		if (ready == handled) {
			log_event_debug("Handled all ready %s event sources",
			                event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, false));
		} else if (*running) {
			log_warn("Handled only %d of %d ready %s event source(s)",
			         handled, ready,
			         event_get_source_type_name(EVENT_SOURCE_TYPE_GENERIC, false));
		}

		// now cleanup event sources that got marked as disconnected/removed
		// during the event handling
		cleanup();
		event_cleanup_sources();
	}

	result = 0;

cleanup:
	*running = false;

	if (_usb_poll_running && !_usb_poll_stuck) {
		_usb_poll_running = false;

		log_debug("Stopping USB poll thread");

		if (usbi_write(_usb_poll_suspend_pipe[1], &byte, 1) < 0) {
			log_error("Could not write to USB suspend pipe");
		} else {
			semaphore_release(&_usb_poll_resume);
			thread_join(&_usb_poll_thread);
		}
	}

	thread_destroy(&_usb_poll_thread);

	event_remove_source(_usb_poll_ready_pipe.read_end, EVENT_SOURCE_TYPE_GENERIC);

	return result;
}
Пример #19
0
void log_init_platform(IO *output) {
	int rc;
	Semaphore handshake;

	log_set_output_platform(output);

	mutex_create(&_named_pipe_write_event_mutex);

	// open event log
	_event_log = RegisterEventSource(NULL, "Brick Daemon");

	if (_event_log == NULL) {
		rc = ERRNO_WINAPI_OFFSET + GetLastError();

		log_error("Could not open Windows event log: %s (%d)",
		          get_errno_name(rc), rc);
	}

	// create named pipe for log messages
	_named_pipe_write_event = CreateEvent(NULL, TRUE, FALSE, NULL);

	if (_named_pipe_write_event == NULL) {
		rc = ERRNO_WINAPI_OFFSET + GetLastError();

		log_error("Could not create named pipe overlapped write event: %s (%d)",
		          get_errno_name(rc), rc);
	} else {
		_named_pipe = CreateNamedPipe("\\\\.\\pipe\\tinkerforge-brick-daemon-debug-log",
		                              PIPE_ACCESS_DUPLEX |
		                              FILE_FLAG_OVERLAPPED |
		                              FILE_FLAG_FIRST_PIPE_INSTANCE,
		                              PIPE_TYPE_MESSAGE |
		                              PIPE_WAIT,
		                              1,
		                              NAMED_PIPE_BUFFER_LENGTH,
		                              NAMED_PIPE_BUFFER_LENGTH,
		                              0,
		                              NULL);

		if (_named_pipe == INVALID_HANDLE_VALUE) {
			rc = ERRNO_WINAPI_OFFSET + GetLastError();

			// ERROR_PIPE_BUSY/ERROR_ACCESS_DENIED means pipe already exists
			// because another instance of brickd is already running. no point
			// in logging this cases on debug level here, as log level is still
			// at the default info level yet. so just ignore these two cases
			if (rc != ERRNO_WINAPI_OFFSET + ERROR_PIPE_BUSY &&
			    rc != ERRNO_WINAPI_OFFSET + ERROR_ACCESS_DENIED) {
				log_error("Could not create named pipe: %s (%d)",
				          get_errno_name(rc), rc);
			}
		} else {
			// create named pipe connect thread
			if (semaphore_create(&handshake) < 0) {
				rc = ERRNO_WINAPI_OFFSET + GetLastError();

				log_error("Could not create handshake semaphore: %s (%d)",
				          get_errno_name(rc), rc);
			} else {
				thread_create(&_named_pipe_thread, log_connect_named_pipe, &handshake);

				semaphore_acquire(&handshake);
				semaphore_destroy(&handshake);
			}
		}
	}
}
Пример #20
0
int iokit_init(void) {
	int phase = 0;
	Semaphore handshake;

	log_debug("Initializing IOKit subsystem");

	// create notification pipe
	if (pipe_create(&_notification_pipe) < 0) {
		log_error("Could not create notification pipe: %s (%d)",
		          get_errno_name(errno), errno);

		goto cleanup;
	}

	phase = 1;

	if (event_add_source(_notification_pipe.read_end, EVENT_SOURCE_TYPE_GENERIC,
	                     EVENT_READ, iokit_forward_notifications, NULL) < 0) {
		goto cleanup;
	}

	phase = 2;

	// create notification poll thread
	if (semaphore_create(&handshake) < 0) {
		log_error("Could not create handshake semaphore: %s (%d)",
		          get_errno_name(errno), errno);

		goto cleanup;
	}

	thread_create(&_poll_thread, iokit_poll_notifications, &handshake);

	semaphore_acquire(&handshake);
	semaphore_destroy(&handshake);

	phase = 3;

	if (!_running) {
		log_error("Could not start notification poll thread");

		goto cleanup;
	}

	phase = 4;

cleanup:
	switch (phase) { // no breaks, all cases fall through intentionally
	case 3:
		thread_destroy(&_poll_thread);

	case 2:
		event_remove_source(_notification_pipe.read_end, EVENT_SOURCE_TYPE_GENERIC);

	case 1:
		pipe_destroy(&_notification_pipe);

	default:
		break;
	}

	return phase == 4 ? 0 : -1;
}
Пример #21
0
status_t d940_ethernet_open (char * name, int32_t mode, void ** data)
{
  d940_eth_t            d940_ethernet_device;
  d940_eth_data_t *     pdata;
  d940_eth_int_t        interrupt;
  d940_eth_ncr_t        ncr;
  d940_eth_ncfgr_t      ncfgr;
  d940_eth_usrio_t      usrio;
  interrupt_status_t    it_status;
  uint32_t              i;
 
  if(data == NULL) return DNA_ERROR;

  /* /!\ Only for one device /!\ */
  if(dna_strcmp (name, d940_ethernet_devices[0]) != 0) return DNA_ERROR;
  pdata = d940_ethernet_handlers[0];

  /* Exclusive access */
  it_status = cpu_trap_mask_and_backup();
  lock_acquire(&pdata->lock);
  if(pdata->ref != 0) 
  {
    lock_release(&pdata->lock);
    cpu_trap_restore(it_status);
    return DNA_ERROR;
  }
  pdata->ref += 1;
  lock_release(&pdata->lock);
  cpu_trap_restore(it_status);

  /* Set device access */
  d940_ethernet_device = pdata->dev;
  *data = pdata;
  
  /* Init all the semaphore */
  while(semaphore_acquire(pdata->mio_sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);
  while(semaphore_acquire(pdata->mio_comp_sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);
  while(semaphore_acquire(pdata->tx_sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);
  while(semaphore_acquire(pdata->rx_sem, 1, DNA_RELATIVE_TIMEOUT, 0) 
        == DNA_OK);

  /* Set the limit of packet in the transmit buffers */
  semaphore_release(pdata->mio_sem, 1, 0);
  semaphore_release(pdata->tx_sem, TX_PACKET_LIMIT, 0);
  
  /* Initialisation of transmit buffer descriptor */
  for(i = 0; i < D940_ETH_TX_BUFFER_COUNT; i++)
  {
    pdata->transmit_descs[i].addr = 
      ((uint32_t)(& pdata->transmit_buffers[i * D940_ETH_TX_BUFFER_SIZE]));
    
    pdata->transmit_descs[i].used = 1;
    pdata->transmit_descs[i].wrap = 0;
    pdata->transmit_descs[i].no_crc = 0;
  }
  pdata->transmit_descs[i-1].wrap = 1;

  /* Initialisation of receive buffer descriptor */
  for(i = 0; i < D940_ETH_RX_BUFFER_COUNT; i++)
  {
    pdata->receive_descs[i].addr = 
      ((uint32_t)(& pdata->receive_buffers[i * D940_ETH_RX_BUFFER_SIZE])) >> 2;
    
    pdata->receive_descs[i].owner = 0;
    pdata->receive_descs[i].wrap = 0;
  }
  pdata->receive_descs[i-1].wrap = 1;
  
  /* Initialisation of the tails */
  pdata->tx_tail = 0;
  pdata->rx_tail = 0;
  pdata->rx_read = false;
  pdata->tx_write = 0;

  /* Initialisation of the stats */
  pdata->tx_count = 0;
  pdata->rx_count = 0;

  /* Configure the PHY */
  pdata->phy_status = 0;
  d940_ethernet_phy_probe(pdata);
  d940_ethernet_phy_manage(pdata);
 
  it_status = cpu_trap_mask_and_backup();
  lock_acquire(&pdata->lock);
  
  /* Configure the device (depends of PHY) */
  cpu_read(UINT32, &(d940_ethernet_device->ncfgr.raw), ncfgr.raw);
  ncfgr.bits.rbof = 0;
  ncfgr.bits.drfcs = 1;
  ncfgr.bits.pae = 1;
  cpu_write(UINT32, &(d940_ethernet_device->ncfgr.raw), ncfgr.raw);

  /* Configure MII */
  cpu_read(UINT32, &(d940_ethernet_device->usrio.raw), usrio.raw);
  usrio.bits.clken = 0;
  usrio.bits.rmii = 0;
  cpu_write(UINT32, &(d940_ethernet_device->usrio.raw), usrio.raw);

  /* Enable Interrupt (NO TXUBR & NO MFD) */
  interrupt.raw = 0x00003cf6;
  cpu_write(UINT32, &(d940_ethernet_device->ier.raw), interrupt.raw);

  /* Start Receive/Transmit */
  cpu_read(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
  ncr.bits.re = 1;
  ncr.bits.te = 1;
  ncr.bits.clrstat = 1;
  cpu_write(UINT32, &(d940_ethernet_device->ncr.raw), ncr.raw);
  
  lock_release(&pdata->lock);
  cpu_trap_restore(it_status);
 
  return DNA_OK;  
}