static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; int err; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = l2cap_pi(sk)->conn; u8 buf[128]; if (l2cap_pi(sk)->amp_id) { /* Physical link must be brought up before connection * completes. */ amp_accept_physical(conn, l2cap_pi(sk)->amp_id, sk); release_sock(sk); return 0; } sk->sk_state = BT_CONFIG; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) { release_sock(sk); return 0; } l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; release_sock(sk); return 0; } release_sock(sk); if (sock->type == SOCK_STREAM) err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); else err = bt_sock_recvmsg(iocb, sock, msg, len, flags); if (err >= 0) l2cap_ertm_recv_done(sk); return err; }
static inline int l2cap_config_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, __u8 *data) { l2cap_conf_req * req = (l2cap_conf_req *) data; __u16 dcid, flags; __u8 rsp[64]; struct sock *sk; int result; dcid = __le16_to_cpu(req->dcid); flags = __le16_to_cpu(req->flags); BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) return -ENOENT; l2cap_parse_conf_req(sk, req->data, cmd->len - L2CAP_CONF_REQ_SIZE); if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, NULL), rsp); goto unlock; } /* Complete config. */ l2cap_send_rsp(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(sk, rsp, &result), rsp); if (result) goto unlock; /* Output config done */ l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { sk->state = BT_CONNECTED; l2cap_chan_ready(sk); } else if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { char req[64]; l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); } unlock: bh_unlock_sock(sk); return 0; }
static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { struct l2cap_conn_rsp rsp; struct l2cap_conn *conn = l2cap_pi(sk)->conn; u8 buf[128]; sk->sk_state = BT_CONFIG; rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) { release_sock(sk); return 0; } l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, l2cap_build_conf_req(sk, buf), buf); l2cap_pi(sk)->num_conf_req++; release_sock(sk); return 0; } release_sock(sk); if (sock->type == SOCK_STREAM) return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); return bt_sock_recvmsg(iocb, sock, msg, len, flags); }
static inline int l2cap_connect_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, __u8 *data) { l2cap_conn_rsp *rsp = (l2cap_conn_rsp *) data; __u16 scid, dcid, result, status; struct sock *sk; char req[128]; scid = __le16_to_cpu(rsp->scid); dcid = __le16_to_cpu(rsp->dcid); result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) return -ENOENT; switch (result) { case L2CAP_CR_SUCCESS: sk->state = BT_CONFIG; l2cap_pi(sk)->dcid = dcid; l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); break; case L2CAP_CR_PEND: break; default: l2cap_chan_del(sk, ECONNREFUSED); break; } bh_unlock_sock(sk); return 0; }
static inline int l2cap_config_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, __u8 *data) { l2cap_conf_rsp *rsp = (l2cap_conf_rsp *)data; __u16 scid, flags, result; struct sock *sk; int err = 0; scid = __le16_to_cpu(rsp->scid); flags = __le16_to_cpu(rsp->flags); result = __le16_to_cpu(rsp->result); BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result); if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid))) return -ENOENT; switch (result) { case L2CAP_CONF_SUCCESS: break; case L2CAP_CONF_UNACCEPT: if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) { char req[128]; /* It does not make sense to adjust L2CAP parameters that are currently defined in the spec. We simply resend config request that we sent earlier. It is stupid :) but it helps qualification testing which expects at least some response from us. */ l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req); goto done; } default: sk->state = BT_DISCONN; sk->err = ECONNRESET; l2cap_sock_set_timer(sk, HZ * 5); { l2cap_disconn_req req; req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid); req.scid = __cpu_to_le16(l2cap_pi(sk)->scid); l2cap_send_req(conn, L2CAP_DISCONN_REQ, L2CAP_DISCONN_REQ_SIZE, &req); } goto done; } if (flags & 0x01) goto done; /* Input config done */ l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { sk->state = BT_CONNECTED; l2cap_chan_ready(sk); } done: bh_unlock_sock(sk); return err; }