void* alloc_room(struct list* l, size_t size) { void* item = list_get_first_item(l); if (item == NULL) item = (void*) malloc(size); return item; }
/*! Called by the get_next_sem_info() macro. */ status_t _get_next_sem_info(team_id teamID, int32 *_cookie, struct sem_info *info, size_t size) { if (!sSemsActive) return B_NO_MORE_SEMS; if (_cookie == NULL || info == NULL || size != sizeof(sem_info)) return B_BAD_VALUE; if (teamID < 0) return B_BAD_TEAM_ID; Team* team = Team::Get(teamID); if (team == NULL) return B_BAD_TEAM_ID; BReference<Team> teamReference(team, true); InterruptsSpinLocker semListLocker(sSemsSpinlock); // TODO: find a way to iterate the list that is more reliable sem_entry* sem = (sem_entry*)list_get_first_item(&team->sem_list); int32 newIndex = *_cookie; int32 index = 0; bool found = false; while (!found) { // find the next entry to be returned while (sem != NULL && index < newIndex) { sem = (sem_entry*)list_get_next_item(&team->sem_list, sem); index++; } if (sem == NULL) return B_BAD_VALUE; GRAB_SEM_LOCK(*sem); if (sem->id != -1 && sem->u.used.owner == team->id) { // found one! fill_sem_info(sem, info, size); newIndex = index + 1; found = true; } else newIndex++; RELEASE_SEM_LOCK(*sem); } if (!found) return B_BAD_VALUE; *_cookie = newIndex; return B_OK; }
// TODO: split for commands and comunication (ACL & SCO) void sched_tx_processing(bt_usb_dev* bdev) { net_buffer* nbuf; snet_buffer* snbuf; status_t err; TRACE("%s: (%p)\n", __func__, bdev); if (!TEST_AND_SET(&bdev->state, PROCESSING)) { // We are not processing in another thread so... START!! do { /* Do while this bit is on... so someone should set it before we * stop the iterations */ bdev->state = CLEAR_BIT(bdev->state, SENDING); // check Commands #ifdef EMPTY_COMMAND_QUEUE while (!list_is_empty(&bdev->nbuffersTx[BT_COMMAND])) { #else if (!list_is_empty(&bdev->nbuffersTx[BT_COMMAND])) { #endif snbuf = (snet_buffer*) list_remove_head_item(&bdev->nbuffersTx[BT_COMMAND]); err = submit_tx_command(bdev, snbuf); if (err != B_OK) { // re-head it list_insert_item_before(&bdev->nbuffersTx[BT_COMMAND], list_get_first_item(&bdev->nbuffersTx[BT_COMMAND]), snbuf); } } // check ACl #define EMPTY_ACL_QUEUE #ifdef EMPTY_ACL_QUEUE while (!list_is_empty(&bdev->nbuffersTx[BT_ACL])) { #else if (!list_is_empty(&bdev->nbuffersTx[BT_ACL])) { #endif nbuf = (net_buffer*) list_remove_head_item(&bdev->nbuffersTx[BT_ACL]); err = submit_tx_acl(bdev, nbuf); if (err != B_OK) { // re-head it list_insert_item_before(&bdev->nbuffersTx[BT_ACL], list_get_first_item(&bdev->nbuffersTx[BT_ACL]), nbuf); } } if (!list_is_empty(&bdev->nbuffersTx[BT_SCO])) { // TODO to be implemented } } while (GET_BIT(bdev->state, SENDING)); bdev->state = CLEAR_BIT(bdev->state, PROCESSING); } else { // We are processing so MARK that we need to still go on with that bdev->state = SET_BIT(bdev->state, SENDING); } } #if 0 // DEPRECATED status_t post_packet_up(bt_usb_dev* bdev, bt_packet_t type, void* buf) { status_t err = B_ERROR; debugf("Frame up type=%d\n", type); if (type == BT_EVENT) { snet_buffer* snbuf = (snet_buffer*)buf; btCoreData->PostEvent(bdev->ndev, snb_get(snbuf), (size_t)snb_size(snbuf)); snb_park(&bdev->snetBufferRecycleTrash, snbuf); debugf("to btDataCore len=%d\n", snb_size(snbuf)); } else { net_buffer* nbuf = (net_buffer*) buf; // No need to free the buffer at allocation is gonna be reused btDevices->receive_data(bdev->ndev, &nbuf); TRACE("to net_device\n"); } return err; }