/** Start an async exchange on the loc session. * * @param iface Location service interface to choose * * @return New exchange. * */ async_exch_t *loc_exchange_begin(loc_interface_t iface) { switch (iface) { case LOC_PORT_SUPPLIER: fibril_mutex_lock(&loc_supplier_mutex); if (loc_supplier_sess == NULL) loc_supplier_sess = service_connect(EXCHANGE_SERIALIZE, SERVICE_LOC, LOC_PORT_SUPPLIER, 0); fibril_mutex_unlock(&loc_supplier_mutex); if (loc_supplier_sess == NULL) return NULL; return async_exchange_begin(loc_supplier_sess); case LOC_PORT_CONSUMER: fibril_mutex_lock(&loc_consumer_mutex); if (loc_consumer_sess == NULL) loc_consumer_sess = service_connect(EXCHANGE_SERIALIZE, SERVICE_LOC, LOC_PORT_CONSUMER, 0); fibril_mutex_unlock(&loc_consumer_mutex); if (loc_consumer_sess == NULL) return NULL; return async_exchange_begin(loc_consumer_sess); default: return NULL; } }
static void udp_sock_close(udp_client_t *client, ipc_callid_t callid, ipc_call_t call) { int socket_id; socket_core_t *sock_core; udp_sockdata_t *socket; int rc; log_msg(LVL_DEBUG, "tcp_sock_close()"); socket_id = SOCKET_GET_SOCKET_ID(call); sock_core = socket_cores_find(&client->sockets, socket_id); if (sock_core == NULL) { async_answer_0(callid, ENOTSOCK); return; } socket = (udp_sockdata_t *)sock_core->specific_data; fibril_mutex_lock(&socket->lock); rc = socket_destroy(NULL, socket_id, &client->sockets, &gsock, udp_free_sock_data); if (rc != EOK) { fibril_mutex_unlock(&socket->lock); async_answer_0(callid, rc); return; } fibril_mutex_unlock(&socket->lock); async_answer_0(callid, EOK); }
/** Find connection structure for specified endpoint pair. * * A connection is uniquely identified by a endpoint pair. Look up our * connection map and return connection structure based on endpoint pair. * The connection reference count is bumped by one. * * @param epp Endpoint pair * @return Connection structure or NULL if not found. */ tcp_conn_t *tcp_conn_find_ref(inet_ep2_t *epp) { int rc; void *arg; tcp_conn_t *conn; log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_conn_find_ref(%p)", epp); fibril_mutex_lock(&conn_list_lock); rc = amap_find_match(amap, epp, &arg); if (rc != EOK) { assert(rc == ENOENT); fibril_mutex_unlock(&conn_list_lock); return NULL; } conn = (tcp_conn_t *)arg; tcp_conn_addref(conn); fibril_mutex_unlock(&conn_list_lock); log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_conn_find_ref: got conn=%p", conn); return conn; }
/** Called when connection state changes. */ static void tcp_sock_cstate_cb(tcp_conn_t *conn, void *arg) { tcp_conn_status_t cstatus; tcp_sock_lconn_t *lconn = (tcp_sock_lconn_t *)arg; tcp_sockdata_t *socket = lconn->socket; log_msg(LVL_DEBUG, "tcp_sock_cstate_cb()"); fibril_mutex_lock(&socket->lock); assert(conn == lconn->conn); tcp_uc_status(conn, &cstatus); if (cstatus.cstate != st_established) { fibril_mutex_unlock(&socket->lock); return; } assert_link_not_used(&lconn->ready_list); list_append(&lconn->ready_list, &socket->ready); log_msg(LVL_DEBUG, "tcp_sock_cstate_cb(): notify accept"); /* Push one accept notification to client's queue */ tcp_sock_notify_aconn(socket->sock_core); fibril_mutex_unlock(&socket->lock); }
/** RECEIVE user call */ tcp_error_t tcp_uc_receive(tcp_conn_t *conn, void *buf, size_t size, size_t *rcvd, xflags_t *xflags) { size_t xfer_size; log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_receive()", conn->name); fibril_mutex_lock(&conn->lock); if (conn->cstate == st_closed) { fibril_mutex_unlock(&conn->lock); return TCP_ENOTEXIST; } /* Wait for data to become available */ while (conn->rcv_buf_used == 0 && !conn->rcv_buf_fin && !conn->reset) { log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_receive() - wait for data"); fibril_condvar_wait(&conn->rcv_buf_cv, &conn->lock); } if (conn->rcv_buf_used == 0) { *rcvd = 0; *xflags = 0; if (conn->rcv_buf_fin) { /* End of data, peer closed connection */ fibril_mutex_unlock(&conn->lock); return TCP_ECLOSING; } else { /* Connection was reset */ assert(conn->reset); fibril_mutex_unlock(&conn->lock); return TCP_ERESET; } } /* Copy data from receive buffer to user buffer */ xfer_size = min(size, conn->rcv_buf_used); memcpy(buf, conn->rcv_buf, xfer_size); *rcvd = xfer_size; /* Remove data from receive buffer */ memmove(conn->rcv_buf, conn->rcv_buf + xfer_size, conn->rcv_buf_used - xfer_size); conn->rcv_buf_used -= xfer_size; conn->rcv_wnd += xfer_size; /* TODO */ *xflags = 0; /* Send new size of receive window */ tcp_tqueue_ctrl_seg(conn, CTL_ACK); log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_receive() - returning %zu bytes", conn->name, xfer_size); fibril_mutex_unlock(&conn->lock); return TCP_EOK; }
/** Allocate frame * * @param nic_data The NIC driver data * @param size Frame size in bytes * @return pointer to allocated frame if success, NULL otherwise */ nic_frame_t *nic_alloc_frame(nic_t *nic_data, size_t size) { nic_frame_t *frame; fibril_mutex_lock(&nic_globals.lock); if (nic_globals.frame_cache_size > 0) { link_t *first = list_first(&nic_globals.frame_cache); list_remove(first); nic_globals.frame_cache_size--; frame = list_get_instance(first, nic_frame_t, link); fibril_mutex_unlock(&nic_globals.lock); } else { fibril_mutex_unlock(&nic_globals.lock); frame = malloc(sizeof(nic_frame_t)); if (!frame) return NULL; link_initialize(&frame->link); } frame->data = malloc(size); if (frame->data == NULL) { free(frame); return NULL; } frame->size = size; return frame; }
/** Timer fibril. * * @param arg Timer */ static int fibril_timer_func(void *arg) { fibril_timer_t *timer = (fibril_timer_t *) arg; int rc; fibril_mutex_lock(&timer->lock); while (true) { while (timer->state != fts_active && timer->state != fts_cleanup) { if (timer->state == fts_cleanup) break; fibril_condvar_wait(&timer->cv, &timer->lock); } if (timer->state == fts_cleanup) break; rc = fibril_condvar_wait_timeout(&timer->cv, &timer->lock, timer->delay); if (rc == ETIMEOUT) { timer->state = fts_fired; fibril_mutex_unlock(&timer->lock); timer->fun(timer->arg); fibril_mutex_lock(&timer->lock); } } fibril_mutex_unlock(&timer->lock); return 0; }
/** Allocate clusters in FAT. * * This function will attempt to allocate the requested number of clusters in * the FAT. The FAT will be altered so that the allocated * clusters form an independent chain (i.e. a chain which does not belong to any * file yet). * * @param bs Buffer holding the boot sector of the file system. * @param service_id Service ID of the file system. * @param nclsts Number of clusters to allocate. * @param mcl Output parameter where the first cluster in the chain * will be returned. * @param lcl Output parameter where the last cluster in the chain * will be returned. * * @return EOK on success, a negative error code otherwise. */ int exfat_alloc_clusters(exfat_bs_t *bs, service_id_t service_id, unsigned nclsts, exfat_cluster_t *mcl, exfat_cluster_t *lcl) { exfat_cluster_t *lifo; /* stack for storing free cluster numbers */ unsigned found = 0; /* top of the free cluster number stack */ exfat_cluster_t clst; int rc = EOK; lifo = (exfat_cluster_t *) malloc(nclsts * sizeof(exfat_cluster_t)); if (!lifo) return ENOMEM; fibril_mutex_lock(&exfat_alloc_lock); for (clst = EXFAT_CLST_FIRST; clst < DATA_CNT(bs) + 2 && found < nclsts; clst++) { /* Need to rewrite because of multiple exfat_bitmap_get calls */ if (exfat_bitmap_is_free(bs, service_id, clst) == EOK) { /* * The cluster is free. Put it into our stack * of found clusters and mark it as non-free. */ lifo[found] = clst; rc = exfat_set_cluster(bs, service_id, clst, (found == 0) ? EXFAT_CLST_EOF : lifo[found - 1]); if (rc != EOK) goto exit_error; found++; rc = exfat_bitmap_set_cluster(bs, service_id, clst); if (rc != EOK) goto exit_error; } } if (rc == EOK && found == nclsts) { *mcl = lifo[found - 1]; *lcl = lifo[0]; free(lifo); fibril_mutex_unlock(&exfat_alloc_lock); return EOK; } rc = ENOSPC; exit_error: /* If something wrong - free the clusters */ while (found--) { (void) exfat_bitmap_clear_cluster(bs, service_id, lifo[found]); (void) exfat_set_cluster(bs, service_id, lifo[found], 0); } free(lifo); fibril_mutex_unlock(&exfat_alloc_lock); return rc; }
/** SEND user call */ tcp_error_t tcp_uc_send(tcp_conn_t *conn, void *data, size_t size, xflags_t flags) { size_t buf_free; size_t xfer_size; log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_send()", conn->name); fibril_mutex_lock(&conn->lock); if (conn->cstate == st_closed) { fibril_mutex_unlock(&conn->lock); return TCP_ENOTEXIST; } if (conn->cstate == st_listen) { /* Change connection to active */ tcp_conn_sync(conn); } if (conn->snd_buf_fin) { fibril_mutex_unlock(&conn->lock); return TCP_ECLOSING; } while (size > 0) { buf_free = conn->snd_buf_size - conn->snd_buf_used; while (buf_free == 0 && !conn->reset) { log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: buf_free == 0, waiting.", conn->name); fibril_condvar_wait(&conn->snd_buf_cv, &conn->lock); buf_free = conn->snd_buf_size - conn->snd_buf_used; } if (conn->reset) { fibril_mutex_unlock(&conn->lock); return TCP_ERESET; } xfer_size = min(size, buf_free); /* Copy data to buffer */ memcpy(conn->snd_buf + conn->snd_buf_used, data, xfer_size); data += xfer_size; conn->snd_buf_used += xfer_size; size -= xfer_size; tcp_tqueue_new_data(conn); } tcp_tqueue_new_data(conn); fibril_mutex_unlock(&conn->lock); return TCP_EOK; }
static int rfb_handle_damage_pixels(visualizer_t *vs, sysarg_t x0, sysarg_t y0, sysarg_t width, sysarg_t height, sysarg_t x_offset, sysarg_t y_offset) { fibril_mutex_lock(&rfb.lock); if (x0 + width > rfb.width || y0 + height > rfb.height) { fibril_mutex_unlock(&rfb.lock); return EINVAL; } /* TODO update surface_t and use it */ if (!rfb.damage_valid) { rfb.damage_rect.x = x0; rfb.damage_rect.y = y0; rfb.damage_rect.width = width; rfb.damage_rect.height = height; rfb.damage_valid = true; } else { if (x0 < rfb.damage_rect.x) { rfb.damage_rect.width += rfb.damage_rect.x - x0; rfb.damage_rect.x = x0; } if (y0 < rfb.damage_rect.y) { rfb.damage_rect.height += rfb.damage_rect.y - y0; rfb.damage_rect.y = y0; } sysarg_t x1 = x0 + width; sysarg_t dx1 = rfb.damage_rect.x + rfb.damage_rect.width; if (x1 > dx1) { rfb.damage_rect.width += x1 - dx1; } sysarg_t y1 = y0 + height; sysarg_t dy1 = rfb.damage_rect.y + rfb.damage_rect.height; if (y1 > dy1) { rfb.damage_rect.height += y1 - dy1; } } pixelmap_t *map = &vs->cells; for (sysarg_t y = y0; y < height + y0; ++y) { for (sysarg_t x = x0; x < width + x0; ++x) { pixel_t pix = pixelmap_get_pixel(map, (x + x_offset) % map->width, (y + y_offset) % map->height); pixelmap_put_pixel(&rfb.framebuffer, x, y, pix); } } fibril_mutex_unlock(&rfb.lock); return EOK; }
int loc_register_cat_change_cb(loc_cat_change_cb_t cb_fun) { fibril_mutex_lock(&loc_callback_mutex); if (loc_callback_create() != EOK) { fibril_mutex_unlock(&loc_callback_mutex); return EIO; } cat_change_cb = cb_fun; fibril_mutex_unlock(&loc_callback_mutex); return EOK; }
/** Initialize the table of open files. */ static bool vfs_files_init(vfs_client_data_t *vfs_data) { fibril_mutex_lock(&vfs_data->lock); if (!vfs_data->files) { vfs_data->files = malloc(MAX_OPEN_FILES * sizeof(vfs_file_t *)); if (!vfs_data->files) { fibril_mutex_unlock(&vfs_data->lock); return false; } memset(vfs_data->files, 0, MAX_OPEN_FILES * sizeof(vfs_file_t *)); } fibril_mutex_unlock(&vfs_data->lock); return true; }
static void nic_driver_release_frame_list(nic_frame_list_t *frames) { if (!frames) return; fibril_mutex_lock(&nic_globals.lock); if (nic_globals.frame_list_cache_size >= NIC_GLOBALS_MAX_CACHE_SIZE) { fibril_mutex_unlock(&nic_globals.lock); free(frames); } else { list_prepend(&frames->head, &nic_globals.frame_list_cache); nic_globals.frame_list_cache_size++; fibril_mutex_unlock(&nic_globals.lock); } }
void vhc_virtdev_unplug(vhc_data_t *vhc, uintptr_t handle) { vhc_virtdev_t *dev = (vhc_virtdev_t *) handle; // FIXME: check status (void) virthub_disconnect_device(&vhc->hub, dev); fibril_mutex_lock(&vhc->guard); fibril_mutex_lock(&dev->guard); dev->plugged = false; list_remove(&dev->link); fibril_mutex_unlock(&dev->guard); fibril_mutex_unlock(&vhc->guard); }
/** Get a received message. * * Pull one message from the association's receive queue. */ int udp_assoc_recv(udp_assoc_t *assoc, udp_msg_t **msg, udp_sock_t *fsock) { link_t *link; udp_rcv_queue_entry_t *rqe; log_msg(LVL_DEBUG, "udp_assoc_recv()"); fibril_mutex_lock(&assoc->lock); while (list_empty(&assoc->rcv_queue)) { log_msg(LVL_DEBUG, "udp_assoc_recv() - waiting"); fibril_condvar_wait(&assoc->rcv_queue_cv, &assoc->lock); } log_msg(LVL_DEBUG, "udp_assoc_recv() - got a message"); link = list_first(&assoc->rcv_queue); rqe = list_get_instance(link, udp_rcv_queue_entry_t, link); list_remove(link); fibril_mutex_unlock(&assoc->lock); *msg = rqe->msg; *fsock = rqe->sp.foreign; free(rqe); return EOK; }
/** Set local socket in association. * * @param assoc Association * @param fsock Foreign socket (deeply copied) */ void udp_assoc_set_local(udp_assoc_t *assoc, udp_sock_t *lsock) { log_msg(LVL_DEBUG, "udp_assoc_set_local(%p, %p)", assoc, lsock); fibril_mutex_lock(&assoc->lock); assoc->ident.local = *lsock; fibril_mutex_unlock(&assoc->lock); }
/** Set foreign socket in association. * * @param assoc Association * @param fsock Foreign socket (deeply copied) */ void udp_assoc_set_foreign(udp_assoc_t *assoc, udp_sock_t *fsock) { log_msg(LVL_DEBUG, "udp_assoc_set_foreign(%p, %p)", assoc, fsock); fibril_mutex_lock(&assoc->lock); assoc->ident.foreign = *fsock; fibril_mutex_unlock(&assoc->lock); }
/** Enlist association. * * Add association to the association map. */ void udp_assoc_add(udp_assoc_t *assoc) { udp_assoc_addref(assoc); fibril_mutex_lock(&assoc_list_lock); list_append(&assoc->link, &assoc_list); fibril_mutex_unlock(&assoc_list_lock); }
static void ping_signal_done(void) { fibril_mutex_lock(&done_lock); done = true; fibril_mutex_unlock(&done_lock); fibril_condvar_broadcast(&done_cv); }
static int vhc_virtdev_plug_generic(vhc_data_t *vhc, async_sess_t *sess, usbvirt_device_t *virtdev, uintptr_t *handle, bool connect, usb_address_t address) { vhc_virtdev_t *dev = vhc_virtdev_create(); if (dev == NULL) { return ENOMEM; } dev->dev_sess = sess; dev->dev_local = virtdev; dev->address = address; fibril_mutex_lock(&vhc->guard); list_append(&dev->link, &vhc->devices); fibril_mutex_unlock(&vhc->guard); fid_t fibril = fibril_create(vhc_transfer_queue_processor, dev); if (fibril == 0) { free(dev); return ENOMEM; } fibril_add_ready(fibril); if (handle != NULL) { *handle = (uintptr_t) dev; } if (connect) { // FIXME: check status (void) virthub_connect_device(&vhc->hub, dev); } return EOK; }
/** * Process root hub request. * * @param instance Root hub instance * @param request Structure containing both request and response information * @return Error code */ void rh_request(rh_t *instance, usb_transfer_batch_t *request) { assert(instance); assert(request); switch (request->ep->transfer_type) { case USB_TRANSFER_CONTROL: usb_log_debug("Root hub got CONTROL packet\n"); control_request(instance, request); break; case USB_TRANSFER_INTERRUPT: usb_log_debug("Root hub got INTERRUPT packet\n"); fibril_mutex_lock(&instance->guard); assert(instance->unfinished_interrupt_transfer == NULL); const uint16_t mask = create_interrupt_mask(instance); if (mask == 0) { usb_log_debug("No changes(%hx)...\n", mask); instance->unfinished_interrupt_transfer = request; } else { usb_log_debug("Processing changes...\n"); interrupt_request( request, mask, instance->interrupt_mask_size); } fibril_mutex_unlock(&instance->guard); break; default: usb_log_error("Root hub got unsupported request.\n"); TRANSFER_END(request, ENOTSUP); } }
static void emit_event(const isdv4_event_t *event) { fibril_mutex_lock(&client_mutex); async_sess_t *sess = client_sess; fibril_mutex_unlock(&client_mutex); if (!sess) return; async_exch_t *exch = async_exchange_begin(sess); if (exch) { unsigned int max_x = state.stylus_max_x; unsigned int max_y = state.stylus_max_y; if (event->source == TOUCH) { max_x = state.touch_max_x; max_y = state.touch_max_y; } async_msg_4(exch, MOUSEEV_ABS_MOVE_EVENT, event->x, event->y, max_x, max_y); if (event->type == PRESS || event->type == RELEASE) { async_msg_2(exch, MOUSEEV_BUTTON_EVENT, event->button, event->type == PRESS); } } async_exchange_end(exch); }
static void loc_cb_conn(ipc_callid_t iid, ipc_call_t *icall, void *arg) { while (true) { ipc_call_t call; ipc_callid_t callid = async_get_call(&call); if (!IPC_GET_IMETHOD(call)) { /* TODO: Handle hangup */ return; } switch (IPC_GET_IMETHOD(call)) { case LOC_EVENT_CAT_CHANGE: fibril_mutex_lock(&loc_callback_mutex); loc_cat_change_cb_t cb_fun = cat_change_cb; fibril_mutex_unlock(&loc_callback_mutex); async_answer_0(callid, EOK); if (cb_fun != NULL) (*cb_fun)(); break; default: async_answer_0(callid, ENOTSUP); } } }
/** * callback called from hub polling fibril when the fibril terminates * * Does not perform cleanup, just marks the hub as not running. * @param device usb device afected * @param was_error indicates that the fibril is stoped due to an error * @param data pointer to usb_hub_dev_t structure */ static void usb_hub_polling_terminated_callback(usb_device_t *device, bool was_error, void *data) { usb_hub_dev_t *hub = data; assert(hub); fibril_mutex_lock(&hub->pending_ops_mutex); /* The device is dead. However there might be some pending operations * that we need to wait for. * One of them is device adding in progress. * The respective fibril is probably waiting for status change * in port reset (port enable) callback. * Such change would never come (otherwise we would not be here). * Thus, we would flush all pending port resets. */ if (hub->pending_ops_count > 0) { for (size_t port = 0; port < hub->port_count; ++port) { usb_hub_port_reset_fail(&hub->ports[port]); } } /* And now wait for them. */ while (hub->pending_ops_count > 0) { fibril_condvar_wait(&hub->pending_ops_cv, &hub->pending_ops_mutex); } fibril_mutex_unlock(&hub->pending_ops_mutex); hub->running = false; }
/** Delist association. * * Remove association from the association map. */ void udp_assoc_remove(udp_assoc_t *assoc) { fibril_mutex_lock(&assoc_list_lock); list_remove(&assoc->link); fibril_mutex_unlock(&assoc_list_lock); udp_assoc_delref(assoc); }
static void cuda_irq_handler(ipc_callid_t iid, ipc_call_t *call) { uint8_t rbuf[CUDA_RCV_BUF_SIZE]; size_t len; bool handle; handle = false; len = 0; fibril_mutex_lock(&instance->dev_lock); /* Lower IFR.SR_INT so that CUDA can generate next int by raising it. */ pio_write_8(&instance->cuda->ifr, SR_INT); switch (instance->xstate) { case cx_listen: cuda_irq_listen(); break; case cx_receive: cuda_irq_receive(); break; case cx_rcv_end: cuda_irq_rcv_end(rbuf, &len); handle = true; break; case cx_send_start: cuda_irq_send_start(); break; case cx_send: cuda_irq_send(); break; } fibril_mutex_unlock(&instance->dev_lock); /* Handle an incoming packet. */ if (handle) cuda_packet_handle(rbuf, len); }
log_level_t get_default_logging_level(void) { fibril_mutex_lock(&default_logging_level_guard); log_level_t result = default_logging_level; fibril_mutex_unlock(&default_logging_level_guard); return result; }
int inet_addrobj_add(inet_addrobj_t *addr) { inet_addrobj_t *aobj; fibril_mutex_lock(&addr_list_lock); aobj = inet_addrobj_find_by_name_locked(addr->name, addr->ilink); if (aobj != NULL) { /* Duplicate address name */ fibril_mutex_unlock(&addr_list_lock); return EEXISTS; } list_append(&addr->addr_list, &addr_list); fibril_mutex_unlock(&addr_list_lock); return EOK; }
/** Destroy timer. * * @param timer Timer, must not be active or accessed by other threads. */ void fibril_timer_destroy(fibril_timer_t *timer) { fibril_mutex_lock(&timer->lock); assert(timer->state != fts_active); timer->state = fts_cleanup; fibril_condvar_broadcast(&timer->cv); fibril_mutex_unlock(&timer->lock); }
/** Delist connection. * * Remove connection from the connection map. */ void tcp_conn_remove(tcp_conn_t *conn) { fibril_mutex_lock(&conn_list_lock); amap_remove(amap, &conn->ident); list_remove(&conn->link); fibril_mutex_unlock(&conn_list_lock); tcp_conn_delref(conn); }