/** * gb_operation_request_send() - send an operation request message * @operation: the operation to initiate * @callback: the operation completion callback * @gfp: the memory flags to use for any allocations * * The caller has filled in any payload so the request message is ready to go. * The callback function supplied will be called when the response message has * arrived, a unidirectional request has been sent, or the operation is * cancelled, indicating that the operation is complete. The callback function * can fetch the result of the operation using gb_operation_result() if * desired. * * Return: 0 if the request was successfully queued in the host-driver queues, * or a negative errno. */ int gb_operation_request_send(struct gb_operation *operation, gb_operation_callback callback, gfp_t gfp) { struct gb_connection *connection = operation->connection; struct gb_operation_msg_hdr *header; unsigned int cycle; int ret; if (!callback) return -EINVAL; /* * Record the callback function, which is executed in * non-atomic (workqueue) context when the final result * of an operation has been set. */ operation->callback = callback; /* * Assign the operation's id, and store it in the request header. * Zero is a reserved operation id for unidirectional operations. */ if (gb_operation_is_unidirectional(operation)) { operation->id = 0; } else { cycle = (unsigned int)atomic_inc_return(&connection->op_cycle); operation->id = (u16)(cycle % U16_MAX + 1); } header = operation->request->header; header->operation_id = cpu_to_le16(operation->id); gb_operation_result_set(operation, -EINPROGRESS); /* * Get an extra reference on the operation. It'll be dropped when the * operation completes. */ gb_operation_get(operation); ret = gb_operation_get_active(operation); if (ret) goto err_put; ret = gb_message_send(operation->request, gfp); if (ret) goto err_put_active; return 0; err_put_active: gb_operation_put_active(operation); err_put: gb_operation_put(operation); return ret; }
/* * Send a response for an incoming operation request. A non-zero * errno indicates a failed operation. * * If there is any response payload, the incoming request handler is * responsible for allocating the response message. Otherwise the * it can simply supply the result errno; this function will * allocate the response message if necessary. */ static int gb_operation_response_send(struct gb_operation *operation, int errno) { struct gb_connection *connection = operation->connection; int ret; if (!operation->response && !gb_operation_is_unidirectional(operation)) { if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL)) return -ENOMEM; } /* Record the result */ if (!gb_operation_result_set(operation, errno)) { dev_err(&connection->hd->dev, "request result already set\n"); return -EIO; /* Shouldn't happen */ } /* Sender of request does not care about response. */ if (gb_operation_is_unidirectional(operation)) return 0; /* Reference will be dropped when message has been sent. */ gb_operation_get(operation); ret = gb_operation_get_active(operation); if (ret) goto err_put; /* Fill in the response header and send it */ operation->response->header->result = gb_operation_errno_map(errno); ret = gb_message_send(operation->response, GFP_KERNEL); if (ret) goto err_put_active; return 0; err_put_active: gb_operation_put_active(operation); err_put: gb_operation_put(operation); return ret; }
/* * Looks up an outgoing operation on a connection and returns a refcounted * pointer if found, or NULL otherwise. */ static struct gb_operation * gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id) { struct gb_operation *operation; unsigned long flags; bool found = false; spin_lock_irqsave(&connection->lock, flags); list_for_each_entry(operation, &connection->operations, links) if (operation->id == operation_id && !gb_operation_is_incoming(operation)) { gb_operation_get(operation); found = true; break; } spin_unlock_irqrestore(&connection->lock, flags); return found ? operation : NULL; }
/* * Cancel all active operations on a connection. * * Should only be called during connection tear down. */ static void gb_connection_cancel_operations(struct gb_connection *connection, int errno) { struct gb_operation *operation; spin_lock_irq(&connection->lock); while (!list_empty(&connection->operations)) { operation = list_last_entry(&connection->operations, struct gb_operation, links); gb_operation_get(operation); spin_unlock_irq(&connection->lock); if (gb_operation_is_incoming(operation)) gb_operation_cancel_incoming(operation, errno); else gb_operation_cancel(operation, errno); gb_operation_put(operation); spin_lock_irq(&connection->lock); } spin_unlock_irq(&connection->lock); }