int btsock_thread_create(btsock_signaled_cb callback, btsock_cmd_cb cmd_callback) { int ret = FALSE; asrt(callback || cmd_callback); lock_slot(&thread_slot_lock); int h = alloc_thread_slot(); unlock_slot(&thread_slot_lock); APPL_TRACE_DEBUG1("alloc_thread_slot ret:%d", h); if(h >= 0) { init_poll(h); if((ts[h].thread_id = create_thread(sock_poll_thread, (void*)h)) != -1) { APPL_TRACE_DEBUG2("h:%d, thread id:%d", h, ts[h].thread_id); ts[h].callback = callback; ts[h].cmd_callback = cmd_callback; } else { free_thread_slot(h); h = -1; } } return h; }
static uint32_t on_srv_rfc_connect(tBTA_JV_RFCOMM_SRV_OPEN *p_open, uint32_t id) { uint32_t new_listen_slot_id = 0; lock_slot(&slot_lock); rfc_slot_t* srv_rs = find_rfc_slot_by_id(id); if(srv_rs) { rfc_slot_t* accept_rs = create_srv_accept_rfc_slot(srv_rs, (const bt_bdaddr_t*)p_open->rem_bda, p_open->handle, p_open->new_listen_handle); if(accept_rs) { //start monitor the socket btsock_thread_add_fd(pth, srv_rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_EXCEPTION, srv_rs->id); btsock_thread_add_fd(pth, accept_rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_RD, accept_rs->id); APPL_TRACE_DEBUG1("sending connect signal & app fd:%dto app server to accept() the connection", accept_rs->app_fd); APPL_TRACE_DEBUG2("server fd:%d, scn:%d", srv_rs->fd, srv_rs->scn); send_app_connect_signal(srv_rs->fd, &accept_rs->addr, srv_rs->scn, 0, accept_rs->app_fd); accept_rs->app_fd = -1; //the fd is closed after sent to app new_listen_slot_id = srv_rs->id; } } unlock_slot(&slot_lock); return new_listen_slot_id; }
static void on_rfc_write_done(tBTA_JV_RFCOMM_WRITE *p, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs && !rs->f.outgoing_congest) { //mointer the fd for any outgoing data btsock_thread_add_fd(pth, rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_RD, rs->id); } unlock_slot(&slot_lock); }
static void on_l2c_write_done(tBTA_JV_L2CAP_WRITE *p, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls && !ls->f.outgoing_congest) { //mointer the fd for any outgoing data btsock_thread_add_fd(pth, ls->fd, BTSOCK_L2CAP, SOCK_THREAD_FD_RD, ls->id); } unlock_slot(&slot_lock); }
static void on_l2c_outgoing_congest(tBTA_JV_L2CAP_CONG *p, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls) { ls->f.outgoing_congest = p->cong ? 1 : 0; //mointer the fd for any outgoing data if(!ls->f.outgoing_congest) btsock_thread_add_fd(pth, ls->fd, BTSOCK_L2CAP, SOCK_THREAD_FD_RD, ls->id); } unlock_slot(&slot_lock); }
static void on_rfc_outgoing_congest(tBTA_JV_RFCOMM_CONG *p, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs) { rs->f.outgoing_congest = p->cong ? 1 : 0; //mointer the fd for any outgoing data if(!rs->f.outgoing_congest) btsock_thread_add_fd(pth, rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_RD, rs->id); } unlock_slot(&slot_lock); }
void btsock_l2c_cleanup() { int curr_pth = pth; pth = -1; btsock_thread_exit(curr_pth); lock_slot(&slot_lock); int i; for(i = 0; i < MAX_L2C_SOCK_CHANNEL; i++) { if(l2c_slots[i].in_use) cleanup_l2c_slot(&l2c_slots[i]); } unlock_slot(&slot_lock); }
void btsock_rfc_cleanup() { int curr_pth = pth; pth = -1; btsock_thread_exit(curr_pth); lock_slot(&slot_lock); int i; for(i = 0; i < MAX_RFC_CHANNEL; i++) { if(rfc_slots[i].id) cleanup_rfc_slot(&rfc_slots[i]); } unlock_slot(&slot_lock); }
static void on_rfc_close(tBTA_JV_RFCOMM_CLOSE * p_close, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs) { APPL_TRACE_DEBUG4("on_rfc_close, slot id:%d, fd:%d, rfc scn:%d, server:%d", rs->id, rs->fd, rs->scn, rs->f.server); free_rfc_slot_scn(rs); // rfc_handle already closed when receiving rfcomm close event from stack. rs->f.connected = FALSE; cleanup_rfc_slot(rs); } unlock_slot(&slot_lock); }
static void on_cl_rfc_init(tBTA_JV_RFCOMM_CL_INIT *p_init, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs) { if (p_init->status != BTA_JV_SUCCESS) cleanup_rfc_slot(rs); else { rs->rfc_handle = p_init->handle; } } unlock_slot(&slot_lock); }
static void on_l2c_close(tBTA_JV_L2CAP_CLOSE * p_close, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls) { APPL_TRACE_DEBUG("on_l2c_close, slot id:%d, fd:%d, l2c psm:%d, server:%d", ls->id, ls->fd, ls->psm, ls->f.server); free_l2c_slot_psm(ls); // l2c_handle already closed when receiving l2cap close event from stack. ls->f.connected = FALSE; cleanup_l2c_slot(ls); } unlock_slot(&slot_lock); }
static void on_cl_l2c_init(tBTA_JV_L2CAP_CL_INIT *p_init, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls) { if (p_init->status != BTA_JV_SUCCESS) cleanup_l2c_slot(ls); else { ls->l2c_handle = p_init->handle; APPL_TRACE_DEBUG("on_cl_l2c_init ls->l2c_handle %d", ls->l2c_handle); } } unlock_slot(&slot_lock); }
bt_status_t btsock_l2c_listen(const char* service_name, const uint8_t* service_uuid, int channel, int* sock_fd, int flags) { int status = BT_STATUS_FAIL; APPL_TRACE_DEBUG("btsock_l2c_listen, service_name:%s", service_name); /* TODO find the available psm list for obex */ if(sock_fd == NULL || (service_uuid == NULL)) { APPL_TRACE_ERROR("invalid sock_fd:%p, uuid:%p", sock_fd, service_uuid); return BT_STATUS_PARM_INVALID; } *sock_fd = -1; if(!is_init_done()) return BT_STATUS_NOT_READY; /* validate it for FTP and OPP */ //Check the service_uuid. overwrite the channel # if reserved int reserved_channel = get_reserved_l2c_channel(service_uuid); if(reserved_channel > 0) { channel = reserved_channel; } else { return BT_STATUS_FAIL; } lock_slot(&slot_lock); l2c_slot_t* ls = alloc_l2c_slot(NULL, service_name, service_uuid, channel, flags, TRUE); if(ls) { APPL_TRACE_DEBUG("BTA_JvCreateRecordByUser:%s", service_name); BTA_JvCreateRecordByUser((void *)(ls->id)); APPL_TRACE_DEBUG("BTA_JvCreateRecordByUser userdata :%d", service_name); *sock_fd = ls->app_fd; ls->app_fd = -1; //the fd ownelship is transferred to app status = BT_STATUS_SUCCESS; btsock_thread_add_fd(pth, ls->fd, BTSOCK_L2CAP, SOCK_THREAD_FD_EXCEPTION, ls->id); } unlock_slot(&slot_lock); return status; }
static void on_srv_rfc_listen_started(tBTA_JV_RFCOMM_START *p_start, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs) { if (p_start->status != BTA_JV_SUCCESS) cleanup_rfc_slot(rs); else { rs->rfc_handle = p_start->handle; if(!send_app_scn(rs)) { //closed APPL_TRACE_DEBUG1("send_app_scn() failed, close rs->id:%d", rs->id); cleanup_rfc_slot(rs); } } } unlock_slot(&slot_lock); }
bt_status_t btsock_rfc_listen(const char* service_name, const uint8_t* service_uuid, int channel, int* sock_fd, int flags) { APPL_TRACE_DEBUG1("btsock_rfc_listen, service_name:%s", service_name); if(sock_fd == NULL || (service_uuid == NULL && (channel < 1 || channel > 30))) { APPL_TRACE_ERROR3("invalid rfc channel:%d or sock_fd:%p, uuid:%p", channel, sock_fd, service_uuid); return BT_STATUS_PARM_INVALID; } *sock_fd = -1; if(!is_init_done()) return BT_STATUS_NOT_READY; if(is_uuid_empty(service_uuid)) service_uuid = UUID_SPP; //use serial port profile to listen to specified channel else { //Check the service_uuid. overwrite the channel # if reserved int reserved_channel = get_reserved_rfc_channel(service_uuid); if(reserved_channel > 0) { channel = reserved_channel; } } int status = BT_STATUS_FAIL; lock_slot(&slot_lock); rfc_slot_t* rs = alloc_rfc_slot(NULL, service_name, service_uuid, channel, flags, TRUE); if(rs) { APPL_TRACE_DEBUG1("BTA_JvCreateRecordByUser:%s", service_name); BTA_JvCreateRecordByUser((void *)rs->id); *sock_fd = rs->app_fd; rs->app_fd = -1; //the fd ownership is transferred to app status = BT_STATUS_SUCCESS; btsock_thread_add_fd(pth, rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_EXCEPTION, rs->id); } unlock_slot(&slot_lock); return status; }
int btsock_thread_exit(int h) { if(h < 0 || h >= MAX_THREAD) { APPL_TRACE_ERROR1("invalid bt thread handle:%d", h); return FALSE; } if(ts[h].cmd_fdw == -1) { APPL_TRACE_ERROR0("cmd socket is not created"); return FALSE; } sock_cmd_t cmd = {CMD_EXIT, 0, 0, 0, 0}; if(send(ts[h].cmd_fdw, &cmd, sizeof(cmd), 0) == sizeof(cmd)) { pthread_join(ts[h].thread_id, 0); lock_slot(&thread_slot_lock); free_thread_slot(h); unlock_slot(&thread_slot_lock); return TRUE; } return FALSE; }
static void on_srv_l2c_listen_started(tBTA_JV_L2CAP_START *p_start, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls) { if (p_start->status != BTA_JV_SUCCESS) cleanup_l2c_slot(ls); else { ls->l2c_handle = p_start->handle; APPL_TRACE_DEBUG("on_srv_l2c_listen_started ls->l2c_handle %d", ls->l2c_handle); if(!send_app_psm(ls)) { //closed APPL_TRACE_DEBUG("send_app_psm() failed, close ls->id:%d", ls->id); cleanup_l2c_slot(ls); } } } unlock_slot(&slot_lock); }
static void on_cli_l2c_connect(tBTA_JV_L2CAP_OPEN *p_open, uint32_t id) { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls && p_open->status == BTA_JV_SUCCESS) { bd_copy(ls->addr.address, p_open->rem_bda, 0); //notify app l2c is connected APPL_TRACE_DEBUG("call send_app_connect_signal, slot id:%d, fd:%d, l2c psm:%d, server:%d", ls->id, ls->fd, ls->psm, ls->f.server); if(send_app_connect_signal(ls->fd, &ls->addr, ls->psm, 0, -1)) { //start monitoring the socketpair to get call back when app writing data APPL_TRACE_DEBUG("on_l2c_connect_ind, connect signal sent, slot id:%d, l2c psm:%d, server:%d", ls->id, ls->psm, ls->f.server); ls->f.connected = TRUE; } else APPL_TRACE_ERROR("send_app_connect_signal failed"); } else if(ls) cleanup_l2c_slot(ls); unlock_slot(&slot_lock); }
static void on_cli_rfc_connect(tBTA_JV_RFCOMM_OPEN *p_open, uint32_t id) { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs && p_open->status == BTA_JV_SUCCESS) { rs->rfc_port_handle = BTA_JvRfcommGetPortHdl(p_open->handle); bd_copy(rs->addr.address, p_open->rem_bda, 0); //notify app rfc is connected APPL_TRACE_DEBUG4("call send_app_connect_signal, slot id:%d, fd:%d, rfc scn:%d, server:%d", rs->id, rs->fd, rs->scn, rs->f.server); if(send_app_connect_signal(rs->fd, &rs->addr, rs->scn, 0, -1)) { //start monitoring the socketpair to get call back when app writing data APPL_TRACE_DEBUG3("on_rfc_connect_ind, connect signal sent, slot id:%d, rfc scn:%d, server:%d", rs->id, rs->scn, rs->f.server); rs->f.connected = TRUE; } else APPL_TRACE_ERROR0("send_app_connect_signal failed"); } else if(rs) cleanup_rfc_slot(rs); unlock_slot(&slot_lock); }
bt_status_t btsock_l2c_connect(const bt_bdaddr_t *bd_addr, const uint8_t* service_uuid, int channel, int* sock_fd, int flags) { if(sock_fd == NULL || (service_uuid == NULL)) { APPL_TRACE_ERROR("invalid sock_fd:%p, uuid:%p", sock_fd, service_uuid); return BT_STATUS_PARM_INVALID; } *sock_fd = -1; if(!is_init_done()) return BT_STATUS_NOT_READY; int status = BT_STATUS_FAIL; lock_slot(&slot_lock); l2c_slot_t* ls = alloc_l2c_slot(bd_addr, NULL, service_uuid, channel, flags, FALSE); if(ls) { ls->f.client = TRUE; if(is_uuid_empty(service_uuid)) { APPL_TRACE_DEBUG("connecting to l2cap channel:%d without service discovery", channel); if(BTA_JvL2capConnect(ls->security, ls->role, ls->psm, 672, ls->addr.address, l2cap_cback, (void*)ls->id) == BTA_JV_SUCCESS) { if(send_app_psm(ls)) { btsock_thread_add_fd(pth, ls->fd, BTSOCK_L2CAP, SOCK_THREAD_FD_RD, ls->id); *sock_fd = ls->app_fd; ls->app_fd = -1; //the fd ownelship is transferred to app status = BT_STATUS_SUCCESS; } else cleanup_l2c_slot(ls); } else cleanup_l2c_slot(ls); } else { tSDP_UUID sdp_uuid; sdp_uuid.len = 16; memcpy(sdp_uuid.uu.uuid128, service_uuid, sizeof(sdp_uuid.uu.uuid128)); logu("service_uuid", service_uuid); *sock_fd = ls->app_fd; ls->app_fd = -1; //the fd ownelship is transferred to app status = BT_STATUS_SUCCESS; l2c_slot_t* ls_doing_sdp = find_l2c_slot_requesting_sdp(); if(ls_doing_sdp == NULL) { BTA_JvStartDiscovery((UINT8*)bd_addr->address, 1, &sdp_uuid, (void*)(ls->id)); ls->f.pending_sdp_request = FALSE; ls->f.doing_sdp_request = TRUE; } else { ls->f.pending_sdp_request = TRUE; ls->f.doing_sdp_request = FALSE; } btsock_thread_add_fd(pth, ls->fd, BTSOCK_L2CAP, SOCK_THREAD_FD_RD, ls->id); } } unlock_slot(&slot_lock); return status; }
bt_status_t btsock_rfc_connect(const bt_bdaddr_t *bd_addr, const uint8_t* service_uuid, int channel, int* sock_fd, int flags) { if(sock_fd == NULL || (service_uuid == NULL && (channel < 1 || channel > 30))) { APPL_TRACE_ERROR3("invalid rfc channel:%d or sock_fd:%p, uuid:%p", channel, sock_fd, service_uuid); return BT_STATUS_PARM_INVALID; } *sock_fd = -1; if(!is_init_done()) return BT_STATUS_NOT_READY; int status = BT_STATUS_FAIL; lock_slot(&slot_lock); rfc_slot_t* rs = alloc_rfc_slot(bd_addr, NULL, service_uuid, channel, flags, FALSE); if(rs) { if(is_uuid_empty(service_uuid)) { APPL_TRACE_DEBUG1("connecting to rfcomm channel:%d without service discovery", channel); if(BTA_JvRfcommConnect(rs->security, rs->role, rs->scn, rs->addr.address, rfcomm_cback, (void*)rs->id) == BTA_JV_SUCCESS) { if(send_app_scn(rs)) { btsock_thread_add_fd(pth, rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_RD, rs->id); *sock_fd = rs->app_fd; rs->app_fd = -1; //the fd ownership is transferred to app status = BT_STATUS_SUCCESS; } else cleanup_rfc_slot(rs); } else cleanup_rfc_slot(rs); } else { tSDP_UUID sdp_uuid; sdp_uuid.len = 16; memcpy(sdp_uuid.uu.uuid128, service_uuid, sizeof(sdp_uuid.uu.uuid128)); logu("service_uuid", service_uuid); *sock_fd = rs->app_fd; rs->app_fd = -1; //the fd ownership is transferred to app status = BT_STATUS_SUCCESS; rfc_slot_t* rs_doing_sdp = find_rfc_slot_requesting_sdp(); if(rs_doing_sdp == NULL) { BTA_JvStartDiscovery((UINT8*)bd_addr->address, 1, &sdp_uuid, (void*)rs->id); rs->f.pending_sdp_request = FALSE; rs->f.doing_sdp_request = TRUE; } else { rs->f.pending_sdp_request = TRUE; rs->f.doing_sdp_request = FALSE; } btsock_thread_add_fd(pth, rs->fd, BTSOCK_RFCOMM, SOCK_THREAD_FD_RD, rs->id); } } unlock_slot(&slot_lock); return status; }
static void jv_dm_cback(tBTA_JV_EVT event, tBTA_JV *p_data, void *user_data) { uint32_t id = (uint32_t)user_data; APPL_TRACE_DEBUG2("jv_dm_cback: event:%d, slot id:%d", event, id); switch(event) { case BTA_JV_CREATE_RECORD_EVT: { lock_slot(&slot_lock); rfc_slot_t* rs = find_rfc_slot_by_id(id); if(rs && create_server_sdp_record(rs)) { //now start the rfcomm server after sdp & channel # assigned BTA_JvRfcommStartServer(rs->security, rs->role, rs->scn, MAX_RFC_SESSION, rfcomm_cback, (void*)rs->id); } else if(rs) { APPL_TRACE_ERROR1("jv_dm_cback: cannot start server, slot found:%p", rs); cleanup_rfc_slot(rs); } unlock_slot(&slot_lock); break; } case BTA_JV_DISCOVERY_COMP_EVT: { rfc_slot_t* rs = NULL; lock_slot(&slot_lock); if(p_data->disc_comp.status == BTA_JV_SUCCESS && p_data->disc_comp.scn) { APPL_TRACE_DEBUG3("BTA_JV_DISCOVERY_COMP_EVT, slot id:%d, status:%d, scn:%d", id, p_data->disc_comp.status, p_data->disc_comp.scn); rs = find_rfc_slot_by_id(id); if(rs && rs->f.doing_sdp_request) { if(BTA_JvRfcommConnect(rs->security, rs->role, p_data->disc_comp.scn, rs->addr.address, rfcomm_cback, (void*)rs->id) == BTA_JV_SUCCESS) { rs->scn = p_data->disc_comp.scn; rs->f.doing_sdp_request = FALSE; if(!send_app_scn(rs)) cleanup_rfc_slot(rs); } else cleanup_rfc_slot(rs); } else if(rs) { APPL_TRACE_ERROR3("DISCOVERY_COMP_EVT no pending sdp request, slot id:%d, \ flag sdp pending:%d, flag sdp doing:%d", id, rs->f.pending_sdp_request, rs->f.doing_sdp_request); } } else { APPL_TRACE_ERROR3("DISCOVERY_COMP_EVT slot id:%d, failed to find channle, \ status:%d, scn:%d", id, p_data->disc_comp.status, p_data->disc_comp.scn); rs = find_rfc_slot_by_id(id); if(rs) cleanup_rfc_slot(rs); } rs = find_rfc_slot_by_pending_sdp(); if(rs) { APPL_TRACE_DEBUG0("BTA_JV_DISCOVERY_COMP_EVT, start another pending scn sdp request"); tSDP_UUID sdp_uuid; sdp_uuid.len = 16; memcpy(sdp_uuid.uu.uuid128, rs->service_uuid, sizeof(sdp_uuid.uu.uuid128)); BTA_JvStartDiscovery((UINT8*)rs->addr.address, 1, &sdp_uuid, (void*)rs->id); rs->f.pending_sdp_request = FALSE; rs->f.doing_sdp_request = TRUE; } unlock_slot(&slot_lock); break; }
static void jv_dm_cback(tBTA_JV_EVT event, tBTA_JV *p_data, void *user_data) { uint32_t id = (uint32_t)user_data; APPL_TRACE_DEBUG("jv_dm_cback: event:%d, slot id:%d", event, id); switch(event) { case BTA_JV_CREATE_RECORD_EVT: { lock_slot(&slot_lock); l2c_slot_t* ls = find_l2c_slot_by_id(id); if(ls && create_server_sdp_record(ls)) { //now start the l2cap server after sdp & channel # assigned BTA_JvL2capStartServer(ls->security, ls->role, ls->psm, 672 , l2cap_cback, (void*)ls->id); } else if(ls) { APPL_TRACE_ERROR("jv_dm_cback: cannot start server, slot found:%p", ls); cleanup_l2c_slot(ls); } unlock_slot(&slot_lock); break; } case BTA_JV_DISCOVERY_COMP_EVT: { l2c_slot_t* ls = NULL; lock_slot(&slot_lock); if(p_data->disc_comp.status == BTA_JV_SUCCESS && p_data->disc_comp.psm) { APPL_TRACE_DEBUG("BTA_JV_DISCOVERY_COMP_EVT, slot id:%d, status:%d, psm:%d", id, p_data->disc_comp.status, p_data->disc_comp.psm); ls = find_l2c_slot_by_id(id); if(ls && ls->f.doing_sdp_request) { if(BTA_JvL2capConnect(ls->security, ls->role, p_data->disc_comp.psm, 672, ls->addr.address, l2cap_cback, (void*)ls->id) == BTA_JV_SUCCESS) { ls->psm = p_data->disc_comp.psm; ls->f.doing_sdp_request = FALSE; if(!send_app_psm(ls)) cleanup_l2c_slot(ls); } else cleanup_l2c_slot(ls); } else if(ls) { APPL_TRACE_ERROR("DISCOVERY_COMP_EVT no pending sdp request, slot id:%d, \ flag sdp pending:%d, flag sdp doing:%d", id, ls->f.pending_sdp_request, ls->f.doing_sdp_request); } } else { APPL_TRACE_ERROR("DISCOVERY_COMP_EVT slot id:%d, failed to find channle, \ status:%d, psm:%d", id, p_data->disc_comp.status, p_data->disc_comp.psm); ls = find_l2c_slot_by_id(id); if(ls) cleanup_l2c_slot(ls); } ls = find_l2c_slot_by_pending_sdp(); if(ls) { APPL_TRACE_DEBUG("BTA_JV_DISCOVERY_COMP_EVT, start another pending psm sdp request"); tSDP_UUID sdp_uuid; sdp_uuid.len = 16; memcpy(sdp_uuid.uu.uuid128, ls->service_uuid, sizeof(sdp_uuid.uu.uuid128)); BTA_JvStartDiscovery((UINT8*)ls->addr.address, 1, &sdp_uuid, (void*)(ls->id)); ls->f.pending_sdp_request = FALSE; ls->f.doing_sdp_request = TRUE; } unlock_slot(&slot_lock); break; }
static int process_slot(struct cache_slot *slot) { int err; err = open_slot(slot); if (!err && slot->match) { if (is_expired(slot)) { if (!lock_slot(slot)) { /* If the cachefile has been replaced between * `open_slot` and `lock_slot`, we'll just * serve the stale content from the original * cachefile. This way we avoid pruning the * newly generated slot. The same code-path * is chosen if fill_slot() fails for some * reason. * * TODO? check if the new slot contains the * same key as the old one, since we would * prefer to serve the newest content. * This will require us to open yet another * file-descriptor and read and compare the * key from the new file, so for now we're * lazy and just ignore the new file. */ if (is_modified(slot) || fill_slot(slot)) { unlock_slot(slot, 0); close_lock(slot); } else { close_slot(slot); unlock_slot(slot, 1); slot->cache_fd = slot->lock_fd; } } } if ((err = print_slot(slot)) != 0) { cache_log("[cgit] error printing cache %s: %s (%d)\n", slot->cache_name, strerror(err), err); } close_slot(slot); return err; } /* If the cache slot does not exist (or its key doesn't match the * current key), lets try to create a new cache slot for this * request. If this fails (for whatever reason), lets just generate * the content without caching it and fool the caller to belive * everything worked out (but print a warning on stdout). */ close_slot(slot); if ((err = lock_slot(slot)) != 0) { cache_log("[cgit] Unable to lock slot %s: %s (%d)\n", slot->lock_name, strerror(err), err); slot->fn(); return 0; } if ((err = fill_slot(slot)) != 0) { cache_log("[cgit] Unable to fill slot %s: %s (%d)\n", slot->lock_name, strerror(err), err); unlock_slot(slot, 0); close_lock(slot); slot->fn(); return 0; } // We've got a valid cache slot in the lock file, which // is about to replace the old cache slot. But if we // release the lockfile and then try to open the new cache // slot, we might get a race condition with a concurrent // writer for the same cache slot (with a different key). // Lets avoid such a race by just printing the content of // the lock file. slot->cache_fd = slot->lock_fd; unlock_slot(slot, 1); if ((err = print_slot(slot)) != 0) { cache_log("[cgit] error printing cache %s: %s (%d)\n", slot->cache_name, strerror(err), err); } close_slot(slot); return err; }
/* * vas_fault() * Process a fault within the given address space * * Returns 0 if the fault could be resolved, 1 if process needs to * receive an event. The HAT layer is expected to reliably hold * a translation added via hat_addtrans() until hat_deletetrans(). * A lost translation would cause the atl to hold multiple entries. */ vas_fault(void *vas, void *vaddr, int write) { struct pview *pv; struct pset *ps; struct perpage *pp; uint idx, pvidx; int error = 0; int wasvalid; /* * Easiest--no view matches address */ if ((pv = find_pview(vas, vaddr)) == 0) { return(1); } ASSERT_DEBUG(pv->p_valid, "vas_fault: pview !p_valid"); ps = pv->p_set; /* * Next easiest--trying to write to read-only view */ if (write && (pv->p_prot & PROT_RO)) { v_lock(&ps->p_lock, SPL0_SAME); return(1); } /* * Transfer from pset lock to page slot lock */ pvidx = btop((char *)vaddr - (char *)pv->p_vaddr); idx = pvidx + pv->p_off; pp = find_pp(ps, idx); lock_slot(ps, pp); /* * If the slot is bad, can't fill */ if (pp->pp_flags & PP_BAD) { error = 1; goto out; } /* * If slot is invalid, request it be filled. Otherwise just * add a reference. */ if (!(pp->pp_flags & PP_V)) { wasvalid = 0; if ((*(ps->p_ops->psop_fillslot))(ps, pp, idx)) { error = 1; goto out; } ASSERT(pp->pp_flags & PP_V, "vm_fault: lost the page"); } else { wasvalid = 1; ref_slot(ps, pp, idx); } /* * Break COW association when we write it */ if ((pp->pp_flags & PP_COW) && write) { /* * May or may not be there. If it is, remove * its reference from the per-page struct. */ if (wasvalid) { if (pv->p_valid[pvidx]) { ASSERT(delete_atl(pp, pv, pvidx) == 0, "vas_fault: p_valid no atl"); pv->p_valid[pvidx] = 0; } deref_slot(ps, pp, idx); } cow_write(ps, pp, idx); ASSERT(pp->pp_flags & PP_V, "vm_fault: lost the page 2"); /* * If not writing to a COW association, then inhibit adding * the translation if it's already present (another thread * ran and brought it in for us, probably) */ } else if (pv->p_valid[pvidx]) { deref_slot(ps, pp, idx); goto out; } /* * With a valid slot, add a hat translation and tabulate * the entry with an atl. */ add_atl(pp, pv, pvidx, 0); hat_addtrans(pv, vaddr, pp->pp_pfn, pv->p_prot | ((pp->pp_flags & PP_COW) ? PROT_RO : 0)); ASSERT_DEBUG(pv->p_valid[pvidx] == 0, "vas_fault: p_valid went on"); pv->p_valid[pvidx] = 1; /* * Free the various things we hold and return */ out: unlock_slot(ps, pp); return(error); }