void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order) { struct afs_permits *permits, *xpermits; struct afs_permit *permit; struct afs_vnode *auth_vnode; int count, loop; _enter("{%x:%u},%x,%lx", vnode->fid.vid, vnode->fid.vnode, key_serial(key), acl_order); auth_vnode = afs_get_auth_inode(vnode, key); if (IS_ERR(auth_vnode)) { _leave(" [get error %ld]", PTR_ERR(auth_vnode)); return; } mutex_lock(&auth_vnode->permits_lock); /* */ if (memcmp(&auth_vnode->fid, &vnode->status.parent, sizeof(struct afs_fid)) != 0) { _debug("renamed"); goto out_unlock; } /* */ if (auth_vnode->acl_order - acl_order > 0) { _debug("ACL changed?"); goto out_unlock; } /* */ _debug("anon access %x", vnode->status.anon_access); auth_vnode->status.anon_access = vnode->status.anon_access; if (key == vnode->volume->cell->anonymous_key) goto out_unlock; xpermits = auth_vnode->permits; count = 0; if (xpermits) { /* */ count = xpermits->count; permit = xpermits->permits; for (loop = count; loop > 0; loop--) { if (permit->key == key) { permit->access_mask = vnode->status.caller_access; goto out_unlock; } permit++; } } permits = kmalloc(sizeof(*permits) + sizeof(*permit) * (count + 1), GFP_NOFS); if (!permits) goto out_unlock; if (xpermits) memcpy(permits->permits, xpermits->permits, count * sizeof(struct afs_permit)); _debug("key %x access %x", key_serial(key), vnode->status.caller_access); permits->permits[count].access_mask = vnode->status.caller_access; permits->permits[count].key = key_get(key); permits->count = count + 1; rcu_assign_pointer(auth_vnode->permits, permits); if (xpermits) call_rcu(&xpermits->rcu, afs_dispose_of_permits); out_unlock: mutex_unlock(&auth_vnode->permits_lock); iput(&auth_vnode->vfs_inode); _leave(""); }
/* * initiate a call */ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, const struct afs_wait_mode *wait_mode) { struct sockaddr_rxrpc srx; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; int ret; struct sk_buff *skb; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); ASSERT(call->type != NULL); ASSERT(call->type->name != NULL); _debug("____MAKE %p{%s,%x} [%d]____", call, call->type->name, key_serial(call->key), atomic_read(&afs_outstanding_calls)); call->wait_mode = wait_mode; call->async_workfn = afs_process_async_call; INIT_WORK(&call->async_work, afs_async_workfn); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = call->service_id; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin); srx.transport.sin.sin_family = AF_INET; srx.transport.sin.sin_port = call->port; memcpy(&srx.transport.sin.sin_addr, addr, 4); /* create a call */ rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, (unsigned long) call, gfp); call->key = NULL; if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); goto error_kill_call; } call->rxcall = rxcall; /* send the request */ iov[0].iov_base = call->request; iov[0].iov_len = call->request_size; msg.msg_name = NULL; msg.msg_namelen = 0; iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); /* have to change the state *before* sending the last packet as RxRPC * might give us the reply before it returns from sending the * request */ if (!call->send_pages) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); if (ret < 0) goto error_do_abort; if (call->send_pages) { ret = afs_send_pages(call, &msg, iov); if (ret < 0) goto error_do_abort; } /* at this point, an async call may no longer exist as it may have * already completed */ return wait_mode->wait(call); error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); error_kill_call: afs_end_call(call); _leave(" = %d", ret); return ret; }
/* * read page from file, directory or symlink, given a key to use */ int afs_page_filler(void *data, struct page *page) { struct inode *inode = page->mapping->host; struct afs_vnode *vnode = AFS_FS_I(inode); struct key *key = data; size_t len; off_t offset; int ret; _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); BUG_ON(!PageLocked(page)); ret = -ESTALE; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto error; /* is it cached? */ #ifdef CONFIG_AFS_FSCACHE ret = fscache_read_or_alloc_page(vnode->cache, page, afs_file_readpage_read_complete, NULL, GFP_KERNEL); #else ret = -ENOBUFS; #endif switch (ret) { /* read BIO submitted (page in cache) */ case 0: break; /* page not yet cached */ case -ENODATA: _debug("cache said ENODATA"); goto go_on; /* page will not be cached */ case -ENOBUFS: _debug("cache said ENOBUFS"); default: go_on: offset = page->index << PAGE_CACHE_SHIFT; len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); /* read the contents of the file from the server into the * page */ ret = afs_vnode_fetch_data(vnode, key, offset, len, page); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" " - marking file deleted and stale"); set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } #ifdef CONFIG_AFS_FSCACHE fscache_uncache_page(vnode->cache, page); #endif BUG_ON(PageFsCache(page)); goto error; } SetPageUptodate(page); /* send the page to the cache */ #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page) && fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { fscache_uncache_page(vnode->cache, page); BUG_ON(PageFsCache(page)); } #endif unlock_page(page); } _leave(" = 0"); return 0; error: SetPageError(page); unlock_page(page); _leave(" = %d", ret); return ret; }
/* * extract control messages from the sendmsg() control buffer */ static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg, unsigned long *user_call_ID, enum rxrpc_command *command, u32 *abort_code, bool server) { struct cmsghdr *cmsg; int len; *command = RXRPC_CMD_SEND_DATA; if (msg->msg_controllen == 0) return -EINVAL; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); _debug("CMSG %d, %d, %d", cmsg->cmsg_level, cmsg->cmsg_type, len); if (cmsg->cmsg_level != SOL_RXRPC) continue; switch (cmsg->cmsg_type) { case RXRPC_USER_CALL_ID: if (msg->msg_flags & MSG_CMSG_COMPAT) { if (len != sizeof(u32)) return -EINVAL; *user_call_ID = *(u32 *) CMSG_DATA(cmsg); } else { if (len != sizeof(unsigned long)) return -EINVAL; *user_call_ID = *(unsigned long *) CMSG_DATA(cmsg); } _debug("User Call ID %lx", *user_call_ID); break; case RXRPC_ABORT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_SEND_ABORT; if (len != sizeof(*abort_code)) return -EINVAL; *abort_code = *(unsigned int *) CMSG_DATA(cmsg); _debug("Abort %x", *abort_code); if (*abort_code == 0) return -EINVAL; break; case RXRPC_ACCEPT: if (*command != RXRPC_CMD_SEND_DATA) return -EINVAL; *command = RXRPC_CMD_ACCEPT; if (len != 0) return -EINVAL; if (!server) return -EISCONN; break; default: return -EINVAL; } } _leave(" = 0"); return 0; }
/* * parse an RxKAD type XDR format token * - the caller guarantees we have at least 4 words */ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, unsigned toklen) { struct rxrpc_key_token *token, **pptoken; size_t plen; u32 tktlen; int ret; _enter(",{%x,%x,%x,%x},%u", ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]), toklen); if (toklen <= 8 * 4) return -EKEYREJECTED; tktlen = ntohl(xdr[7]); _debug("tktlen: %x", tktlen); if (tktlen > AFSTOKEN_RK_TIX_MAX) return -EKEYREJECTED; if (8 * 4 + tktlen != toklen) return -EKEYREJECTED; plen = sizeof(*token) + sizeof(*token->kad) + tktlen; ret = key_payload_reserve(key, key->datalen + plen); if (ret < 0) return ret; plen -= sizeof(*token); token = kmalloc(sizeof(*token), GFP_KERNEL); if (!token) return -ENOMEM; token->kad = kmalloc(plen, GFP_KERNEL); if (!token->kad) { kfree(token); return -ENOMEM; } token->security_index = RXRPC_SECURITY_RXKAD; token->kad->ticket_len = tktlen; token->kad->vice_id = ntohl(xdr[0]); token->kad->kvno = ntohl(xdr[1]); token->kad->start = ntohl(xdr[4]); token->kad->expiry = ntohl(xdr[5]); token->kad->primary_flag = ntohl(xdr[6]); memcpy(&token->kad->session_key, &xdr[2], 8); memcpy(&token->kad->ticket, &xdr[8], tktlen); _debug("SCIX: %u", token->security_index); _debug("TLEN: %u", token->kad->ticket_len); _debug("EXPY: %x", token->kad->expiry); _debug("KVNO: %u", token->kad->kvno); _debug("PRIM: %u", token->kad->primary_flag); _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x", token->kad->session_key[0], token->kad->session_key[1], token->kad->session_key[2], token->kad->session_key[3], token->kad->session_key[4], token->kad->session_key[5], token->kad->session_key[6], token->kad->session_key[7]); if (token->kad->ticket_len >= 8) _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x", token->kad->ticket[0], token->kad->ticket[1], token->kad->ticket[2], token->kad->ticket[3], token->kad->ticket[4], token->kad->ticket[5], token->kad->ticket[6], token->kad->ticket[7]); /* count the number of tokens attached */ key->type_data.x[0]++; /* attach the data */ for (pptoken = (struct rxrpc_key_token **)&key->payload.data; *pptoken; pptoken = &(*pptoken)->next) continue; *pptoken = token; if (token->kad->expiry < key->expiry) key->expiry = token->kad->expiry; _leave(" = 0"); return 0; }
/* * generate a connection-level abort */ static int rxrpc_abort_connection(struct rxrpc_connection *conn, u32 error, u32 abort_code) { struct rxrpc_header hdr; struct msghdr msg; struct kvec iov[2]; __be32 word; size_t len; int ret; _enter("%d,,%u,%u", conn->debug_id, error, abort_code); /* generate a connection-level abort */ spin_lock_bh(&conn->state_lock); if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) { conn->state = RXRPC_CONN_LOCALLY_ABORTED; conn->error = error; spin_unlock_bh(&conn->state_lock); } else { spin_unlock_bh(&conn->state_lock); _leave(" = 0 [already dead]"); return 0; } rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code); msg.msg_name = &conn->trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; hdr.epoch = conn->epoch; hdr.cid = conn->cid; hdr.callNumber = 0; hdr.seq = 0; hdr.type = RXRPC_PACKET_TYPE_ABORT; hdr.flags = conn->out_clientflag; hdr.userStatus = 0; hdr.securityIndex = conn->security_ix; hdr._rsvd = 0; hdr.serviceId = conn->service_id; word = htonl(abort_code); iov[0].iov_base = &hdr; iov[0].iov_len = sizeof(hdr); iov[1].iov_base = &word; iov[1].iov_len = sizeof(word); len = iov[0].iov_len + iov[1].iov_len; hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); if (ret < 0) { _debug("sendmsg failed: %d", ret); return -EAGAIN; } _leave(" = 0"); return 0; }
/* * inode retrieval */ struct inode *afs_iget(struct super_block *sb, struct key *key, struct afs_fid *fid, struct afs_file_status *status, struct afs_callback *cb) { struct afs_iget_data data = { .fid = *fid }; struct afs_super_info *as; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique); as = sb->s_fs_info; data.volume = as->volume; inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set, &data); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT INODE %p { vl=%x vn=%x, u=%x }", inode, fid->vid, fid->vnode, fid->unique); vnode = AFS_FS_I(inode); /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { _leave(" = %p", inode); return inode; } if (!status) { /* it's a remotely extant inode */ set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto bad_inode; } else { /* it's an inode we just created */ memcpy(&vnode->status, status, sizeof(vnode->status)); if (!cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ vnode->cb_version = 0; vnode->cb_expiry = 0; vnode->cb_type = 0; vnode->cb_expires = get_seconds(); } else { vnode->cb_version = cb->version; vnode->cb_expiry = cb->expiry; vnode->cb_type = cb->type; vnode->cb_expires = vnode->cb_expiry + get_seconds(); } } /* set up caching before mapping the status, as map-status reads the * first page of symlinks to see if they're really mountpoints */ inode->i_size = vnode->status.size; #ifdef CONFIG_AFS_FSCACHE vnode->cache = fscache_acquire_cookie(vnode->volume->cache, &afs_vnode_cache_index_def, vnode); #endif ret = afs_inode_map_status(vnode, key); if (ret < 0) goto bad_inode; /* success */ clear_bit(AFS_VNODE_UNSET, &vnode->flags); inode->i_flags |= S_NOATIME; unlock_new_inode(inode); _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type); return inode; /* failure */ bad_inode: #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_cookie(vnode->cache, 0); vnode->cache = NULL; #endif iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); }
/* * send a message through an RxRPC socket * - in a client this does a number of things: * - finds/sets up a connection for the security specified (if any) * - initiates a call (ID in control data) * - ends the request phase of a call (if MSG_MORE is not set) * - sends a call data packet * - may send an abort (abort code in control data) */ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); int ret; _enter(",{%d},,%zu", rx->sk.sk_state, len); if (m->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (m->msg_name) { ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen); if (ret < 0) { _leave(" = %d [bad addr]", ret); return ret; } } lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNBOUND: rx->srx.srx_family = AF_RXRPC; rx->srx.srx_service = 0; rx->srx.transport_type = SOCK_DGRAM; rx->srx.transport.family = rx->family; switch (rx->family) { case AF_INET: rx->srx.transport_len = sizeof(struct sockaddr_in); break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: rx->srx.transport_len = sizeof(struct sockaddr_in6); break; #endif default: ret = -EAFNOSUPPORT; goto error_unlock; } local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx); if (IS_ERR(local)) { ret = PTR_ERR(local); goto error_unlock; } rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_UNBOUND; /* Fall through */ case RXRPC_CLIENT_UNBOUND: case RXRPC_CLIENT_BOUND: if (!m->msg_name && test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) { m->msg_name = &rx->connect_srx; m->msg_namelen = sizeof(rx->connect_srx); } /* Fall through */ case RXRPC_SERVER_BOUND: case RXRPC_SERVER_LISTENING: ret = rxrpc_do_sendmsg(rx, m, len); /* The socket has been unlocked */ goto out; default: ret = -EINVAL; goto error_unlock; } error_unlock: release_sock(&rx->sk); out: _leave(" = %d", ret); return ret; }
/* * see if we have space for a number of pages and/or a number of files in the * cache */ int cachefiles_has_space(struct cachefiles_cache *cache, unsigned fnr, unsigned bnr) { struct kstatfs stats; int ret; //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u", // (unsigned long long) cache->frun, // (unsigned long long) cache->fcull, // (unsigned long long) cache->fstop, // (unsigned long long) cache->brun, // (unsigned long long) cache->bcull, // (unsigned long long) cache->bstop, // fnr, bnr); /* find out how many pages of blockdev are available */ memset(&stats, 0, sizeof(stats)); ret = vfs_statfs(cache->mnt->mnt_root, &stats); if (ret < 0) { if (ret == -EIO) cachefiles_io_error(cache, "statfs failed"); _leave(" = %d", ret); return ret; } stats.f_bavail >>= cache->bshift; //_debug("avail %llu,%llu", // (unsigned long long) stats.f_ffree, // (unsigned long long) stats.f_bavail); /* see if there is sufficient space */ if (stats.f_ffree > fnr) stats.f_ffree -= fnr; else stats.f_ffree = 0; if (stats.f_bavail > bnr) stats.f_bavail -= bnr; else stats.f_bavail = 0; ret = -ENOBUFS; if (stats.f_ffree < cache->fstop || stats.f_bavail < cache->bstop) goto begin_cull; ret = 0; if (stats.f_ffree < cache->fcull || stats.f_bavail < cache->bcull) goto begin_cull; if (test_bit(CACHEFILES_CULLING, &cache->flags) && stats.f_ffree >= cache->frun && stats.f_bavail >= cache->brun && test_and_clear_bit(CACHEFILES_CULLING, &cache->flags) ) { _debug("cease culling"); cachefiles_state_changed(cache); } //_leave(" = 0"); return 0; begin_cull: if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) { _debug("### CULL CACHE ###"); cachefiles_state_changed(cache); } _leave(" = %d", ret); return ret; }
/* * Handle retransmission and deferred ACK/abort generation. */ void rxrpc_process_call(struct work_struct *work) { struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); ktime_t now; rxrpc_see_call(call); //printk("\n--------------------\n"); _enter("{%d,%s,%lx}", call->debug_id, rxrpc_call_states[call->state], call->events); recheck_state: if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { rxrpc_send_abort_packet(call); goto recheck_state; } if (call->state == RXRPC_CALL_COMPLETE) { del_timer_sync(&call->timer); rxrpc_notify_socket(call); goto out_put; } now = ktime_get_real(); if (ktime_before(call->expire_at, now)) { rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME); set_bit(RXRPC_CALL_EV_ABORT, &call->events); goto recheck_state; } if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { if (call->ackr_reason) { rxrpc_send_ack_packet(call, false); goto recheck_state; } } if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { rxrpc_send_ack_packet(call, true); goto recheck_state; } if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { rxrpc_resend(call, now); goto recheck_state; } rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); /* other events may have been raised since we started checking */ if (call->events && call->state < RXRPC_CALL_COMPLETE) { __rxrpc_queue_call(call); goto out; } out_put: rxrpc_put_call(call, rxrpc_call_put); out: _leave(""); }
/* * bind a local address to an RxRPC socket */ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; struct rxrpc_local *local; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); u16 service_id; int ret; _enter("%p,%p,%d", rx, saddr, len); ret = rxrpc_validate_address(rx, srx, len); if (ret < 0) goto error; service_id = srx->srx_service; lock_sock(&rx->sk); switch (rx->sk.sk_state) { case RXRPC_UNBOUND: rx->srx = *srx; local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx); if (IS_ERR(local)) { ret = PTR_ERR(local); goto error_unlock; } if (service_id) { write_lock(&local->services_lock); if (rcu_access_pointer(local->service)) goto service_in_use; rx->local = local; rcu_assign_pointer(local->service, rx); write_unlock(&local->services_lock); rx->sk.sk_state = RXRPC_SERVER_BOUND; } else { rx->local = local; rx->sk.sk_state = RXRPC_CLIENT_BOUND; } break; case RXRPC_SERVER_BOUND: ret = -EINVAL; if (service_id == 0) goto error_unlock; ret = -EADDRINUSE; if (service_id == rx->srx.srx_service) goto error_unlock; ret = -EINVAL; srx->srx_service = rx->srx.srx_service; if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0) goto error_unlock; rx->second_service = service_id; rx->sk.sk_state = RXRPC_SERVER_BOUND2; break; default: ret = -EINVAL; goto error_unlock; } release_sock(&rx->sk); _leave(" = 0"); return 0; service_in_use: write_unlock(&local->services_lock); rxrpc_put_local(local); ret = -EADDRINUSE; error_unlock: release_sock(&rx->sk); error: _leave(" = %d", ret); return ret; }
/* * Perform retransmission of NAK'd and unack'd packets. */ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; rxrpc_seq_t cursor, seq, top; ktime_t max_age, oldest, ack_ts; int ix; u8 annotation, anno_type, retrans = 0, unacked = 0; _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); max_age = ktime_sub_ms(now, rxrpc_resend_timeout); spin_lock_bh(&call->lock); cursor = call->tx_hard_ack; top = call->tx_top; ASSERT(before_eq(cursor, top)); if (cursor == top) goto out_unlock; /* Scan the packet list without dropping the lock and decide which of * the packets in the Tx buffer we're going to resend and what the new * resend timeout will be. */ oldest = now; for (seq = cursor + 1; before_eq(seq, top); seq++) { ix = seq & RXRPC_RXTX_BUFF_MASK; annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; annotation &= ~RXRPC_TX_ANNO_MASK; if (anno_type == RXRPC_TX_ANNO_ACK) continue; skb = call->rxtx_buffer[ix]; rxrpc_see_skb(skb, rxrpc_skb_tx_seen); sp = rxrpc_skb(skb); if (anno_type == RXRPC_TX_ANNO_UNACK) { if (ktime_after(skb->tstamp, max_age)) { if (ktime_before(skb->tstamp, oldest)) oldest = skb->tstamp; continue; } if (!(annotation & RXRPC_TX_ANNO_RESENT)) unacked++; } /* Okay, we need to retransmit a packet. */ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation; retrans++; trace_rxrpc_retransmit(call, seq, annotation | anno_type, ktime_to_ns(ktime_sub(skb->tstamp, max_age))); } call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); if (unacked) rxrpc_congestion_timeout(call); /* If there was nothing that needed retransmission then it's likely * that an ACK got lost somewhere. Send a ping to find out instead of * retransmitting data. */ if (!retrans) { rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); spin_unlock_bh(&call->lock); ack_ts = ktime_sub(now, call->acks_latest_ts); if (ktime_to_ns(ack_ts) < call->peer->rtt) goto out; rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); rxrpc_send_ack_packet(call, true); goto out; } /* Now go through the Tx window and perform the retransmissions. We * have to drop the lock for each send. If an ACK comes in whilst the * lock is dropped, it may clear some of the retransmission markers for * packets that it soft-ACKs. */ for (seq = cursor + 1; before_eq(seq, top); seq++) { ix = seq & RXRPC_RXTX_BUFF_MASK; annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; if (anno_type != RXRPC_TX_ANNO_RETRANS) continue; skb = call->rxtx_buffer[ix]; rxrpc_get_skb(skb, rxrpc_skb_tx_got); spin_unlock_bh(&call->lock); if (rxrpc_send_data_packet(call, skb, true) < 0) { rxrpc_free_skb(skb, rxrpc_skb_tx_freed); return; } if (rxrpc_is_client_call(call)) rxrpc_expose_client_call(call); rxrpc_free_skb(skb, rxrpc_skb_tx_freed); spin_lock_bh(&call->lock); /* We need to clear the retransmit state, but there are two * things we need to be aware of: A new ACK/NAK might have been * received and the packet might have been hard-ACK'd (in which * case it will no longer be in the buffer). */ if (after(seq, call->tx_hard_ack)) { annotation = call->rxtx_annotations[ix]; anno_type = annotation & RXRPC_TX_ANNO_MASK; if (anno_type == RXRPC_TX_ANNO_RETRANS || anno_type == RXRPC_TX_ANNO_NAK) { annotation &= ~RXRPC_TX_ANNO_MASK; annotation |= RXRPC_TX_ANNO_UNACK; } annotation |= RXRPC_TX_ANNO_RESENT; call->rxtx_annotations[ix] = annotation; } if (after(call->tx_hard_ack, seq)) seq = call->tx_hard_ack; } out_unlock: spin_unlock_bh(&call->lock); out: _leave(""); }
int afs_permission(struct inode *inode, int mask) { struct afs_vnode *vnode = AFS_FS_I(inode); afs_access_t uninitialized_var(access); struct key *key; int ret; if (mask & MAY_NOT_BLOCK) return -ECHILD; _enter("{{%x:%u},%lx},%x,", vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask); key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return PTR_ERR(key); } /* */ if (!vnode->cb_promised) { _debug("not promised"); ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) goto error; _debug("new promise [fl=%lx]", vnode->flags); } /* */ ret = afs_check_permit(vnode, key, &access); if (ret < 0) goto error; /* */ _debug("REQ %x ACC %x on %s", mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); if (S_ISDIR(inode->i_mode)) { if (mask & MAY_EXEC) { if (!(access & AFS_ACE_LOOKUP)) goto permission_denied; } else if (mask & MAY_READ) { if (!(access & AFS_ACE_READ)) goto permission_denied; } else if (mask & MAY_WRITE) { if (!(access & (AFS_ACE_DELETE | /* */ AFS_ACE_INSERT | /* */ AFS_ACE_WRITE))) /* */ goto permission_denied; } else { BUG(); } } else { if (!(access & AFS_ACE_LOOKUP)) goto permission_denied; if (mask & (MAY_EXEC | MAY_READ)) { if (!(access & AFS_ACE_READ)) goto permission_denied; } else if (mask & MAY_WRITE) { if (!(access & AFS_ACE_WRITE)) goto permission_denied; } } key_put(key); ret = generic_permission(inode, mask); _leave(" = %d", ret); return ret; permission_denied: ret = -EACCES; error: key_put(key); _leave(" = %d", ret); return ret; }
static int afs_check_permit(struct afs_vnode *vnode, struct key *key, afs_access_t *_access) { struct afs_permits *permits; struct afs_permit *permit; struct afs_vnode *auth_vnode; bool valid; int loop, ret; _enter("{%x:%u},%x", vnode->fid.vid, vnode->fid.vnode, key_serial(key)); auth_vnode = afs_get_auth_inode(vnode, key); if (IS_ERR(auth_vnode)) { *_access = 0; _leave(" = %ld", PTR_ERR(auth_vnode)); return PTR_ERR(auth_vnode); } ASSERT(S_ISDIR(auth_vnode->vfs_inode.i_mode)); /* */ if (key == auth_vnode->volume->cell->anonymous_key) { _debug("anon"); *_access = auth_vnode->status.anon_access; valid = true; } else { valid = false; rcu_read_lock(); permits = rcu_dereference(auth_vnode->permits); if (permits) { permit = permits->permits; for (loop = permits->count; loop > 0; loop--) { if (permit->key == key) { _debug("found in cache"); *_access = permit->access_mask; valid = true; break; } permit++; } } rcu_read_unlock(); } if (!valid) { /* */ _debug("no valid permit"); set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); ret = afs_vnode_fetch_status(vnode, auth_vnode, key); if (ret < 0) { iput(&auth_vnode->vfs_inode); *_access = 0; _leave(" = %d", ret); return ret; } *_access = vnode->status.caller_access; } iput(&auth_vnode->vfs_inode); _leave(" = 0 [access %x]", *_access); return 0; }
/* * connection-level Rx packet processor */ static int rxrpc_process_event(struct rxrpc_connection *conn, struct sk_buff *skb, u32 *_abort_code) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); __be32 tmp; u32 serial; int loop, ret; if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { kleave(" = -ECONNABORTED [%u]", conn->state); return -ECONNABORTED; } serial = ntohl(sp->hdr.serial); _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, serial); switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_ABORT: if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0) return -EPROTO; _proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp)); conn->state = RXRPC_CONN_REMOTELY_ABORTED; rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, ntohl(tmp)); return -ECONNABORTED; case RXRPC_PACKET_TYPE_CHALLENGE: if (conn->security) return conn->security->respond_to_challenge( conn, skb, _abort_code); return -EPROTO; case RXRPC_PACKET_TYPE_RESPONSE: if (!conn->security) return -EPROTO; ret = conn->security->verify_response(conn, skb, _abort_code); if (ret < 0) return ret; ret = conn->security->init_connection_security(conn); if (ret < 0) return ret; conn->security->prime_packet_security(conn); read_lock_bh(&conn->lock); spin_lock(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) { conn->state = RXRPC_CONN_SERVER; for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure(conn->channels[loop]); } spin_unlock(&conn->state_lock); read_unlock_bh(&conn->lock); return 0; default: _leave(" = -EPROTO [%u]", sp->hdr.type); return -EPROTO; } }
/* * Create an inode for a dynamic root directory or an autocell dynamic * automount dir. */ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) { struct afs_iget_data data; struct afs_super_info *as; struct afs_vnode *vnode; struct inode *inode; static atomic_t afs_autocell_ino; _enter(""); as = sb->s_fs_info; if (as->volume) { data.volume = as->volume; data.fid.vid = as->volume->vid; } if (root) { data.fid.vnode = 1; data.fid.unique = 1; } else { data.fid.vnode = atomic_inc_return(&afs_autocell_ino); data.fid.unique = 0; } inode = iget5_locked(sb, data.fid.vnode, afs_iget5_pseudo_dir_test, afs_iget5_set, &data); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }", inode, inode->i_ino, data.fid.vid, data.fid.vnode, data.fid.unique); vnode = AFS_FS_I(inode); /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; if (root) { inode->i_op = &afs_dynroot_inode_operations; inode->i_fop = &afs_dynroot_file_operations; } else { inode->i_op = &afs_autocell_inode_operations; } set_nlink(inode, 2); inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; inode->i_ctime.tv_sec = get_seconds(); inode->i_ctime.tv_nsec = 0; inode->i_atime = inode->i_mtime = inode->i_ctime; inode->i_blocks = 0; inode_set_iversion_raw(inode, 0); inode->i_generation = 0; set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); if (!root) { set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); inode->i_flags |= S_AUTOMOUNT; } inode->i_flags |= S_NOATIME; unlock_new_inode(inode); _leave(" = %p", inode); return inode; }
/* * reject packets through the local endpoint */ void rxrpc_reject_packets(struct work_struct *work) { union { struct sockaddr sa; struct sockaddr_in sin; } sa; struct rxrpc_skb_priv *sp; struct rxrpc_header hdr; struct rxrpc_local *local; struct sk_buff *skb; struct msghdr msg; struct kvec iov[2]; size_t size; __be32 code; local = container_of(work, struct rxrpc_local, rejecter); rxrpc_get_local(local); _enter("%d", local->debug_id); iov[0].iov_base = &hdr; iov[0].iov_len = sizeof(hdr); iov[1].iov_base = &code; iov[1].iov_len = sizeof(code); size = sizeof(hdr) + sizeof(code); msg.msg_name = &sa; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; memset(&sa, 0, sizeof(sa)); sa.sa.sa_family = local->srx.transport.family; switch (sa.sa.sa_family) { case AF_INET: msg.msg_namelen = sizeof(sa.sin); break; default: msg.msg_namelen = 0; break; } memset(&hdr, 0, sizeof(hdr)); hdr.type = RXRPC_PACKET_TYPE_ABORT; while ((skb = skb_dequeue(&local->reject_queue))) { sp = rxrpc_skb(skb); switch (sa.sa.sa_family) { case AF_INET: sa.sin.sin_port = udp_hdr(skb)->source; sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; code = htonl(skb->priority); hdr.epoch = sp->hdr.epoch; hdr.cid = sp->hdr.cid; hdr.callNumber = sp->hdr.callNumber; hdr.serviceId = sp->hdr.serviceId; hdr.flags = sp->hdr.flags; hdr.flags ^= RXRPC_CLIENT_INITIATED; hdr.flags &= RXRPC_CLIENT_INITIATED; kernel_sendmsg(local->socket, &msg, iov, 2, size); break; default: break; } rxrpc_free_skb(skb); rxrpc_put_local(local); } rxrpc_put_local(local); _leave(""); }
/* * validate a vnode/inode * - there are several things we need to check * - parent dir data changes (rm, rmdir, rename, mkdir, create, link, * symlink) * - parent dir metadata changed (security changes) * - dentry data changed (write, truncate) * - dentry metadata changed (security changes) */ int afs_validate(struct afs_vnode *vnode, struct key *key) { time64_t now = ktime_get_real_seconds(); bool valid = false; int ret; _enter("{v={%x:%u} fl=%lx},%x", vnode->fid.vid, vnode->fid.vnode, vnode->flags, key_serial(key)); /* Quickly check the callback state. Ideally, we'd use read_seqbegin * here, but we have no way to pass the net namespace to the RCU * cleanup for the server record. */ read_seqlock_excl(&vnode->cb_lock); if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) { vnode->cb_s_break = vnode->cb_interest->server->cb_s_break; } else if (!test_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags) && !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && vnode->cb_expires_at - 10 > now) { valid = true; } } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { valid = true; } read_sequnlock_excl(&vnode->cb_lock); if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) clear_nlink(&vnode->vfs_inode); if (valid) goto valid; mutex_lock(&vnode->validate_lock); /* if the promise has expired, we need to check the server again to get * a new promise - note that if the (parent) directory's metadata was * changed then the security may be different and we may no longer have * access */ if (!test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) { _debug("not promised"); ret = afs_fetch_status(vnode, key); if (ret < 0) { if (ret == -ENOENT) { set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } goto error_unlock; } _debug("new promise [fl=%lx]", vnode->flags); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _debug("file already deleted"); ret = -ESTALE; goto error_unlock; } /* if the vnode's data version number changed then its contents are * different */ if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) afs_zap_data(vnode); clear_bit(AFS_VNODE_DIR_MODIFIED, &vnode->flags); mutex_unlock(&vnode->validate_lock); valid: _leave(" = 0"); return 0; error_unlock: mutex_unlock(&vnode->validate_lock); _leave(" = %d", ret); return ret; }
/* * create a vfsmount to be automounted */ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) { struct afs_super_info *super; struct vfsmount *mnt; struct afs_vnode *vnode; struct page *page; char *devname, *options; bool rwpath = false; int ret; _enter("{%pd}", mntpt); BUG_ON(!d_inode(mntpt)); ret = -ENOMEM; devname = (char *) get_zeroed_page(GFP_KERNEL); if (!devname) goto error_no_devname; options = (char *) get_zeroed_page(GFP_KERNEL); if (!options) goto error_no_options; vnode = AFS_FS_I(d_inode(mntpt)); if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) { /* if the directory is a pseudo directory, use the d_name */ static const char afs_root_cell[] = ":root.cell."; unsigned size = mntpt->d_name.len; ret = -ENOENT; if (size < 2 || size > AFS_MAXCELLNAME) goto error_no_page; if (mntpt->d_name.name[0] == '.') { devname[0] = '#'; memcpy(devname + 1, mntpt->d_name.name, size - 1); memcpy(devname + size, afs_root_cell, sizeof(afs_root_cell)); rwpath = true; } else { devname[0] = '%'; memcpy(devname + 1, mntpt->d_name.name, size); memcpy(devname + size + 1, afs_root_cell, sizeof(afs_root_cell)); } } else { /* read the contents of the AFS special symlink */ loff_t size = i_size_read(d_inode(mntpt)); char *buf; ret = -EINVAL; if (size > PAGE_SIZE - 1) goto error_no_page; page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); goto error_no_page; } ret = -EIO; if (PageError(page)) goto error; buf = kmap_atomic(page); memcpy(devname, buf, size); kunmap_atomic(buf); put_page(page); page = NULL; } /* work out what options we want */ super = AFS_FS_S(mntpt->d_sb); memcpy(options, "cell=", 5); strcpy(options + 5, super->volume->cell->name); if (super->volume->type == AFSVL_RWVOL || rwpath) strcat(options, ",rwpath"); /* try and do the mount */ _debug("--- attempting mount %s -o %s ---", devname, options); mnt = vfs_submount(mntpt, &afs_fs_type, devname, options); _debug("--- mount result %p ---", mnt); free_page((unsigned long) devname); free_page((unsigned long) options); _leave(" = %p", mnt); return mnt; error: put_page(page); error_no_page: free_page((unsigned long) options); error_no_options: free_page((unsigned long) devname); error_no_devname: _leave(" = %d", ret); return ERR_PTR(ret); }
/* * send a message forming part of a client call through an RxRPC socket * - caller holds the socket locked * - the socket may be either a client socket or a server socket */ int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx, struct rxrpc_transport *trans, struct msghdr *msg, size_t len) { struct rxrpc_conn_bundle *bundle; enum rxrpc_command cmd; struct rxrpc_call *call; unsigned long user_call_ID = 0; struct key *key; __be16 service_id; u32 abort_code = 0; int ret; _enter(""); ASSERT(trans != NULL); ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code, false); if (ret < 0) return ret; bundle = NULL; if (trans) { service_id = rx->service_id; if (msg->msg_name) { struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) msg->msg_name; service_id = htons(srx->srx_service); } key = rx->key; if (key && !rx->key->payload.data) key = NULL; bundle = rxrpc_get_bundle(rx, trans, key, service_id, GFP_KERNEL); if (IS_ERR(bundle)) return PTR_ERR(bundle); } call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, abort_code == 0, GFP_KERNEL); if (trans) rxrpc_put_bundle(trans, bundle); if (IS_ERR(call)) { _leave(" = %ld", PTR_ERR(call)); return PTR_ERR(call); } _debug("CALL %d USR %lx ST %d on CONN %p", call->debug_id, call->user_call_ID, call->state, call->conn); if (call->state >= RXRPC_CALL_COMPLETE) { /* it's too late for this call */ ret = -ESHUTDOWN; } else if (cmd == RXRPC_CMD_SEND_ABORT) { rxrpc_send_abort(call, abort_code); } else if (cmd != RXRPC_CMD_SEND_DATA) { ret = -EINVAL; } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) { /* request phase complete for this client call */ ret = -EPROTO; } else { ret = rxrpc_send_data(iocb, rx, call, msg, len); } rxrpc_put_call(call); _leave(" = %d", ret); return ret; }
/* * receive a message from an RxRPC socket * - we need to be careful about two or more threads calling recvmsg * simultaneously */ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct rxrpc_skb_priv *sp; struct rxrpc_call *call = NULL, *continue_call = NULL; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct sk_buff *skb; long timeo; int copy, ret, ullen, offset, copied = 0; u32 abort_code; DEFINE_WAIT(wait); _enter(",,,%zu,%d", len, flags); if (flags & (MSG_OOB | MSG_TRUNC)) return -EOPNOTSUPP; ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long); timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT); msg->msg_flags |= MSG_MORE; lock_sock(&rx->sk); for (;;) { /* return immediately if a client socket has no outstanding * calls */ if (RB_EMPTY_ROOT(&rx->calls)) { if (copied) goto out; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); if (continue_call) rxrpc_put_call(continue_call); return -ENODATA; } } /* get the next message on the Rx queue */ skb = skb_peek(&rx->sk.sk_receive_queue); if (!skb) { /* nothing remains on the queue */ if (copied && (msg->msg_flags & MSG_PEEK || timeo == 0)) goto out; /* wait for a message to turn up */ release_sock(&rx->sk); prepare_to_wait_exclusive(rx->sk.sk_sleep, &wait, TASK_INTERRUPTIBLE); ret = sock_error(&rx->sk); if (ret) goto wait_error; if (skb_queue_empty(&rx->sk.sk_receive_queue)) { if (signal_pending(current)) goto wait_interrupted; timeo = schedule_timeout(timeo); } finish_wait(rx->sk.sk_sleep, &wait); lock_sock(&rx->sk); continue; } peek_next_packet: sp = rxrpc_skb(skb); call = sp->call; ASSERT(call != NULL); _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]); /* make sure we wait for the state to be updated in this call */ spin_lock_bh(&call->lock); spin_unlock_bh(&call->lock); if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) { _debug("packet from released call"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); continue; } /* determine whether to continue last data receive */ if (continue_call) { _debug("maybe cont"); if (call != continue_call || skb->mark != RXRPC_SKB_MARK_DATA) { release_sock(&rx->sk); rxrpc_put_call(continue_call); _leave(" = %d [noncont]", copied); return copied; } } rxrpc_get_call(call); /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_timestamp(msg, &rx->sk, skb); } /* receive the message */ if (skb->mark != RXRPC_SKB_MARK_DATA) goto receive_non_data_message; _debug("recvmsg DATA #%u { %d, %d }", ntohl(sp->hdr.seq), skb->len, sp->offset); if (!continue_call) { /* only set the control data once per recvmsg() */ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, ullen, &call->user_call_ID); if (ret < 0) goto copy_error; ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags)); } ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv); ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1); call->rx_data_recv = ntohl(sp->hdr.seq); ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten); offset = sp->offset; copy = skb->len - offset; if (copy > len - copied) copy = len - copied; if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); } else { ret = skb_copy_and_csum_datagram_iovec(skb, offset, msg->msg_iov); if (ret == -EINVAL) goto csum_copy_error; } if (ret < 0) goto copy_error; /* handle piecemeal consumption of data packets */ _debug("copied %d+%d", copy, copied); offset += copy; copied += copy; if (!(flags & MSG_PEEK)) sp->offset = offset; if (sp->offset < skb->len) { _debug("buffer full"); ASSERTCMP(copied, ==, len); break; } /* we transferred the whole data packet */ if (sp->hdr.flags & RXRPC_LAST_PACKET) { _debug("last"); if (call->conn->out_clientflag) { /* last byte of reply received */ ret = copied; goto terminal_message; } /* last bit of request received */ if (!(flags & MSG_PEEK)) { _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); } msg->msg_flags &= ~MSG_MORE; break; } /* move on to the next data message */ _debug("next"); if (!continue_call) continue_call = sp->call; else rxrpc_put_call(call); call = NULL; if (flags & MSG_PEEK) { _debug("peek next"); skb = skb->next; if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue) break; goto peek_next_packet; } _debug("eat packet"); if (skb_dequeue(&rx->sk.sk_receive_queue) != skb) BUG(); rxrpc_free_skb(skb); }
/* * queue a packet for transmission, set the resend timer and attempt * to send the packet immediately */ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb, bool last) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); int ret; _net("queue skb %p [%d]", skb, call->acks_head); ASSERT(call->acks_window != NULL); call->acks_window[call->acks_head] = (unsigned long) skb; smp_wmb(); call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1); if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { _debug("________awaiting reply/ACK__________"); write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_CLIENT_SEND_REQUEST: call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; break; case RXRPC_CALL_SERVER_ACK_REQUEST: call->state = RXRPC_CALL_SERVER_SEND_REPLY; if (!last) break; case RXRPC_CALL_SERVER_SEND_REPLY: call->state = RXRPC_CALL_SERVER_AWAIT_ACK; break; default: break; } write_unlock_bh(&call->state_lock); } _proto("Tx DATA %%%u { #%u }", ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); sp->need_resend = 0; sp->resend_at = jiffies + rxrpc_resend_timeout * HZ; if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) { _debug("run timer"); call->resend_timer.expires = sp->resend_at; add_timer(&call->resend_timer); } /* attempt to cancel the rx-ACK timer, deferring reply transmission if * we're ACK'ing the request phase of an incoming call */ ret = -EAGAIN; if (try_to_del_timer_sync(&call->ack_timer) >= 0) { /* the packet may be freed by rxrpc_process_call() before this * returns */ ret = rxrpc_send_packet(call->conn->trans, skb); _net("sent skb %p", skb); } else { _debug("failed to delete ACK timer"); } if (ret < 0) { _debug("need instant resend %d", ret); sp->need_resend = 1; rxrpc_instant_resend(call); } _leave(""); }
/* * extract a krb5 principal */ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, const __be32 **_xdr, unsigned *_toklen) { const __be32 *xdr = *_xdr; unsigned toklen = *_toklen, n_parts, loop, tmp; /* there must be at least one name, and at least #names+1 length * words */ if (toklen <= 12) return -EINVAL; _enter(",{%x,%x,%x},%u", ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), toklen); n_parts = ntohl(*xdr++); toklen -= 4; if (n_parts <= 0 || n_parts > AFSTOKEN_K5_COMPONENTS_MAX) return -EINVAL; princ->n_name_parts = n_parts; if (toklen <= (n_parts + 1) * 4) return -EINVAL; princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL); if (!princ->name_parts) return -ENOMEM; for (loop = 0; loop < n_parts; loop++) { if (toklen < 4) return -EINVAL; tmp = ntohl(*xdr++); toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) return -EINVAL; if (tmp > toklen) return -EINVAL; princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->name_parts[loop]) return -ENOMEM; memcpy(princ->name_parts[loop], xdr, tmp); princ->name_parts[loop][tmp] = 0; tmp = (tmp + 3) & ~3; toklen -= tmp; xdr += tmp >> 2; } if (toklen < 4) return -EINVAL; tmp = ntohl(*xdr++); toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) return -EINVAL; if (tmp > toklen) return -EINVAL; princ->realm = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->realm) return -ENOMEM; memcpy(princ->realm, xdr, tmp); princ->realm[tmp] = 0; tmp = (tmp + 3) & ~3; toklen -= tmp; xdr += tmp >> 2; _debug("%s/...@%s", princ->name_parts[0], princ->realm); *_xdr = xdr; *_toklen = toklen; _leave(" = 0 [toklen=%u]", toklen); return 0; }
/* * initialise an object * - check the specified object's parent to see if we can make use of it * immediately to do a creation * - we may need to start the process of creating a parent and we need to wait * for the parent's lookup and creation to complete if it's not there yet * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the * leaf-most cookies of the object and all its children */ static void fscache_initialise_object(struct fscache_object *object) { struct fscache_object *parent; _enter(""); ASSERT(object->cookie != NULL); ASSERT(object->cookie->parent != NULL); ASSERT(list_empty(&object->work.link)); if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) | (1 << FSCACHE_OBJECT_EV_RELEASE) | (1 << FSCACHE_OBJECT_EV_RETIRE) | (1 << FSCACHE_OBJECT_EV_WITHDRAW))) { _debug("abort init %lx", object->events); spin_lock(&object->lock); object->state = FSCACHE_OBJECT_ABORT_INIT; spin_unlock(&object->lock); return; } spin_lock(&object->cookie->lock); spin_lock_nested(&object->cookie->parent->lock, 1); parent = object->parent; if (!parent) { _debug("no parent"); set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); } else { spin_lock(&object->lock); spin_lock_nested(&parent->lock, 1); _debug("parent %s", fscache_object_states[parent->state]); if (parent->state >= FSCACHE_OBJECT_DYING) { _debug("bad parent"); set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events); } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) { _debug("wait"); /* we may get woken up in this state by child objects * binding on to us, so we need to make sure we don't * add ourself to the list multiple times */ if (list_empty(&object->dep_link)) { object->cache->ops->grab_object(object); list_add(&object->dep_link, &parent->dependents); /* fscache_acquire_non_index_cookie() uses this * to wake the chain up */ if (parent->state == FSCACHE_OBJECT_INIT) fscache_enqueue_object(parent); } } else { _debug("go"); parent->n_ops++; parent->n_obj_ops++; object->lookup_jif = jiffies; object->state = FSCACHE_OBJECT_LOOKING_UP; set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } spin_unlock(&parent->lock); spin_unlock(&object->lock); } spin_unlock(&object->cookie->parent->lock); spin_unlock(&object->cookie->lock); _leave(""); }
/* * attach the data from a bunch of pages on an inode to a call */ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, struct kvec *iov) { struct page *pages[8]; unsigned count, n, loop, offset, to; pgoff_t first = call->first, last = call->last; int ret; _enter(""); offset = call->first_offset; call->first_offset = 0; do { _debug("attach %lx-%lx", first, last); count = last - first + 1; if (count > ARRAY_SIZE(pages)) count = ARRAY_SIZE(pages); n = find_get_pages_contig(call->mapping, first, count, pages); ASSERTCMP(n, ==, count); loop = 0; do { msg->msg_flags = 0; to = PAGE_SIZE; if (first + loop >= last) to = call->last_to; else msg->msg_flags = MSG_MORE; iov->iov_base = kmap(pages[loop]) + offset; iov->iov_len = to - offset; offset = 0; _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, iov, 1, to - offset); /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it * returns from sending the request */ if (first + loop >= last) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(call->rxcall, msg, to - offset); kunmap(pages[loop]); if (ret < 0) break; } while (++loop < count); first += count; for (loop = 0; loop < count; loop++) put_page(pages[loop]); if (ret < 0) break; } while (first <= last); _leave(" = %d", ret); return ret; }
/* * process events that have been sent to an object's state machine * - initiates parent lookup * - does object lookup * - does object creation * - does object recycling and retirement * - does object withdrawal */ static void fscache_object_state_machine(struct fscache_object *object) { enum fscache_object_state new_state; ASSERT(object != NULL); _enter("{OBJ%x,%s,%lx}", object->debug_id, fscache_object_states[object->state], object->events); switch (object->state) { /* wait for the parent object to become ready */ case FSCACHE_OBJECT_INIT: object->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED); fscache_initialise_object(object); goto done; /* look up the object metadata on disk */ case FSCACHE_OBJECT_LOOKING_UP: fscache_lookup_object(object); goto lookup_transit; /* create the object metadata on disk */ case FSCACHE_OBJECT_CREATING: fscache_lookup_object(object); goto lookup_transit; /* handle an object becoming available; start pending * operations and queue dependent operations for processing */ case FSCACHE_OBJECT_AVAILABLE: fscache_object_available(object); goto active_transit; /* normal running state */ case FSCACHE_OBJECT_ACTIVE: goto active_transit; /* update the object metadata on disk */ case FSCACHE_OBJECT_UPDATING: clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); fscache_stat(&fscache_n_updates_run); object->cache->ops->update_object(object); goto active_transit; /* handle an object dying during lookup or creation */ case FSCACHE_OBJECT_LC_DYING: object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); object->cache->ops->lookup_complete(object); spin_lock(&object->lock); object->state = FSCACHE_OBJECT_DYING; if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); spin_unlock(&object->lock); fscache_done_parent_op(object); /* wait for completion of all active operations on this object * and the death of all child objects of this object */ case FSCACHE_OBJECT_DYING: dying: clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events); spin_lock(&object->lock); _debug("dying OBJ%x {%d,%d}", object->debug_id, object->n_ops, object->n_children); if (object->n_ops == 0 && object->n_children == 0) { object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_CLEARED); object->event_mask |= (1 << FSCACHE_OBJECT_EV_WITHDRAW) | (1 << FSCACHE_OBJECT_EV_RETIRE) | (1 << FSCACHE_OBJECT_EV_RELEASE) | (1 << FSCACHE_OBJECT_EV_ERROR); } else { object->event_mask &= ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | (1 << FSCACHE_OBJECT_EV_RETIRE) | (1 << FSCACHE_OBJECT_EV_RELEASE) | (1 << FSCACHE_OBJECT_EV_ERROR)); object->event_mask |= 1 << FSCACHE_OBJECT_EV_CLEARED; } spin_unlock(&object->lock); fscache_enqueue_dependents(object); goto terminal_transit; /* handle an abort during initialisation */ case FSCACHE_OBJECT_ABORT_INIT: _debug("handle abort init %lx", object->events); object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); spin_lock(&object->lock); fscache_dequeue_object(object); object->state = FSCACHE_OBJECT_DYING; if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); spin_unlock(&object->lock); goto dying; /* handle the netfs releasing an object and possibly marking it * obsolete too */ case FSCACHE_OBJECT_RELEASING: case FSCACHE_OBJECT_RECYCLING: object->event_mask &= ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | (1 << FSCACHE_OBJECT_EV_RETIRE) | (1 << FSCACHE_OBJECT_EV_RELEASE) | (1 << FSCACHE_OBJECT_EV_ERROR)); fscache_release_object(object); spin_lock(&object->lock); object->state = FSCACHE_OBJECT_DEAD; spin_unlock(&object->lock); fscache_stat(&fscache_n_object_dead); goto terminal_transit; /* handle the parent cache of this object being withdrawn from * active service */ case FSCACHE_OBJECT_WITHDRAWING: object->event_mask &= ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) | (1 << FSCACHE_OBJECT_EV_RETIRE) | (1 << FSCACHE_OBJECT_EV_RELEASE) | (1 << FSCACHE_OBJECT_EV_ERROR)); fscache_withdraw_object(object); spin_lock(&object->lock); object->state = FSCACHE_OBJECT_DEAD; spin_unlock(&object->lock); fscache_stat(&fscache_n_object_dead); goto terminal_transit; /* complain about the object being woken up once it is * deceased */ case FSCACHE_OBJECT_DEAD: printk(KERN_ERR "FS-Cache:" " Unexpected event in dead state %lx\n", object->events & object->event_mask); BUG(); default: printk(KERN_ERR "FS-Cache: Unknown object state %u\n", object->state); BUG(); } /* determine the transition from a lookup state */ lookup_transit: switch (fls(object->events & object->event_mask) - 1) { case FSCACHE_OBJECT_EV_WITHDRAW: case FSCACHE_OBJECT_EV_RETIRE: case FSCACHE_OBJECT_EV_RELEASE: case FSCACHE_OBJECT_EV_ERROR: new_state = FSCACHE_OBJECT_LC_DYING; goto change_state; case FSCACHE_OBJECT_EV_REQUEUE: goto done; case -1: goto done; /* sleep until event */ default: goto unsupported_event; } /* determine the transition from an active state */ active_transit: switch (fls(object->events & object->event_mask) - 1) { case FSCACHE_OBJECT_EV_WITHDRAW: case FSCACHE_OBJECT_EV_RETIRE: case FSCACHE_OBJECT_EV_RELEASE: case FSCACHE_OBJECT_EV_ERROR: new_state = FSCACHE_OBJECT_DYING; goto change_state; case FSCACHE_OBJECT_EV_UPDATE: new_state = FSCACHE_OBJECT_UPDATING; goto change_state; case -1: new_state = FSCACHE_OBJECT_ACTIVE; goto change_state; /* sleep until event */ default: goto unsupported_event; } /* determine the transition from a terminal state */ terminal_transit: switch (fls(object->events & object->event_mask) - 1) { case FSCACHE_OBJECT_EV_WITHDRAW: new_state = FSCACHE_OBJECT_WITHDRAWING; goto change_state; case FSCACHE_OBJECT_EV_RETIRE: new_state = FSCACHE_OBJECT_RECYCLING; goto change_state; case FSCACHE_OBJECT_EV_RELEASE: new_state = FSCACHE_OBJECT_RELEASING; goto change_state; case FSCACHE_OBJECT_EV_ERROR: new_state = FSCACHE_OBJECT_WITHDRAWING; goto change_state; case FSCACHE_OBJECT_EV_CLEARED: new_state = FSCACHE_OBJECT_DYING; goto change_state; case -1: goto done; /* sleep until event */ default: goto unsupported_event; } change_state: spin_lock(&object->lock); object->state = new_state; spin_unlock(&object->lock); done: _leave(" [->%s]", fscache_object_states[object->state]); return; unsupported_event: printk(KERN_ERR "FS-Cache:" " Unsupported event %lx [mask %lx] in state %s\n", object->events, object->event_mask, fscache_object_states[object->state]); BUG(); }
/* * deliver messages to a call */ static void afs_deliver_to_call(struct afs_call *call) { struct sk_buff *skb; bool last; u32 abort_code; int ret; _enter(""); while ((call->state == AFS_CALL_AWAIT_REPLY || call->state == AFS_CALL_AWAIT_OP_ID || call->state == AFS_CALL_AWAIT_REQUEST || call->state == AFS_CALL_AWAIT_ACK) && (skb = skb_dequeue(&call->rx_queue))) { switch (skb->mark) { case RXRPC_SKB_MARK_DATA: _debug("Rcv DATA"); last = rxrpc_kernel_is_data_last(skb); ret = call->type->deliver(call, skb, last); switch (ret) { case 0: if (last && call->state == AFS_CALL_AWAIT_REPLY) call->state = AFS_CALL_COMPLETE; break; case -ENOTCONN: abort_code = RX_CALL_DEAD; goto do_abort; case -ENOTSUPP: abort_code = RX_INVALID_OPERATION; goto do_abort; default: abort_code = RXGEN_CC_UNMARSHAL; if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; do_abort: rxrpc_kernel_abort_call(call->rxcall, abort_code); call->error = ret; call->state = AFS_CALL_ERROR; break; } afs_data_delivered(skb); skb = NULL; continue; case RXRPC_SKB_MARK_FINAL_ACK: _debug("Rcv ACK"); call->state = AFS_CALL_COMPLETE; break; case RXRPC_SKB_MARK_BUSY: _debug("Rcv BUSY"); call->error = -EBUSY; call->state = AFS_CALL_BUSY; break; case RXRPC_SKB_MARK_REMOTE_ABORT: abort_code = rxrpc_kernel_get_abort_code(skb); call->error = call->type->abort_to_error(abort_code); call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv NET ERROR %d", call->error); break; case RXRPC_SKB_MARK_LOCAL_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv LOCAL ERROR %d", call->error); break; default: BUG(); break; } afs_free_skb(skb); } /* make sure the queue is empty if the call is done with (we might have * aborted the call early because of an unmarshalling error) */ if (call->state >= AFS_CALL_COMPLETE) { while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); if (call->incoming) afs_end_call(call); } _leave(""); }
/* * fetch file status from the volume * - don't issue a fetch if: * - the changed bit is not set and there's a valid callback * - there are any outstanding ops that will fetch the status * - TODO implement local caching */ int afs_vnode_fetch_status(afs_vnode_t *vnode) { afs_server_t *server; int ret; DECLARE_WAITQUEUE(myself,current); _enter("%s,{%u,%u,%u}",vnode->volume->vlocation->vldb.name, vnode->fid.vid,vnode->fid.vnode,vnode->fid.unique); if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) { _leave(" [unchanged]"); return 0; } if (vnode->flags & AFS_VNODE_DELETED) { _leave(" [deleted]"); return -ENOENT; } spin_lock(&vnode->lock); if (!(vnode->flags & AFS_VNODE_CHANGED)) { spin_unlock(&vnode->lock); _leave(" [unchanged]"); return 0; } if (vnode->update_cnt>0) { /* someone else started a fetch */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&vnode->update_waitq,&myself); /* wait for the status to be updated */ for (;;) { if (!(vnode->flags & AFS_VNODE_CHANGED)) break; if (vnode->flags & AFS_VNODE_DELETED) break; /* it got updated and invalidated all before we saw it */ if (vnode->update_cnt==0) { remove_wait_queue(&vnode->update_waitq,&myself); set_current_state(TASK_RUNNING); goto get_anyway; } spin_unlock(&vnode->lock); schedule(); set_current_state(TASK_UNINTERRUPTIBLE); spin_lock(&vnode->lock); } remove_wait_queue(&vnode->update_waitq,&myself); spin_unlock(&vnode->lock); set_current_state(TASK_RUNNING); return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0; } get_anyway: /* okay... we're going to have to initiate the op */ vnode->update_cnt++; spin_unlock(&vnode->lock); /* merge AFS status fetches and clear outstanding callback on this vnode */ do { /* pick a server to query */ ret = afs_volume_pick_fileserver(vnode->volume,&server); if (ret<0) return ret; _debug("USING SERVER: %08x\n",ntohl(server->addr.s_addr)); ret = afs_rxfs_fetch_file_status(server,vnode,NULL); } while (!afs_volume_release_fileserver(vnode->volume,server,ret)); /* adjust the flags */ afs_vnode_finalise_status_update(vnode,server,ret); _leave(" = %d",ret); return ret; } /* end afs_vnode_fetch_status() */
/* * inode retrieval */ inline int afs_iget(struct super_block *sb, struct afs_fid *fid, struct inode **_inode) { struct afs_iget_data data = { .fid = *fid }; struct afs_super_info *as; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%u,%u,%u},,", fid->vid, fid->vnode, fid->unique); as = sb->s_fs_info; data.volume = as->volume; inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set, &data); if (!inode) { _leave(" = -ENOMEM"); return -ENOMEM; } vnode = AFS_FS_I(inode); /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { ret = afs_vnode_fetch_status(vnode); if (ret==0) *_inode = inode; else iput(inode); _leave(" = %d", ret); return ret; } #ifdef AFS_CACHING_SUPPORT /* set up caching before reading the status, as fetch-status reads the * first page of symlinks to see if they're really mntpts */ cachefs_acquire_cookie(vnode->volume->cache, NULL, vnode, &vnode->cache); #endif /* okay... it's a new inode */ inode->i_flags |= S_NOATIME; vnode->flags |= AFS_VNODE_CHANGED; ret = afs_inode_fetch_status(inode); if (ret<0) goto bad_inode; /* success */ unlock_new_inode(inode); *_inode = inode; _leave(" = 0 [CB { v=%u x=%lu t=%u }]", vnode->cb_version, vnode->cb_timeout.timo_jif, vnode->cb_type); return 0; /* failure */ bad_inode: make_bad_inode(inode); unlock_new_inode(inode); iput(inode); _leave(" = %d [bad]", ret); return ret; } /* end afs_iget() */
void rxrpc_UDP_error_report(struct sock *sk) { struct sock_exterr_skb *serr; struct rxrpc_transport *trans; struct rxrpc_local *local = sk->sk_user_data; struct rxrpc_peer *peer; struct sk_buff *skb; __be32 addr; __be16 port; _enter("%p{%d}", sk, local->debug_id); skb = skb_dequeue(&sk->sk_error_queue); if (!skb) { _leave("UDP socket errqueue empty"); return; } rxrpc_new_skb(skb); serr = SKB_EXT_ERR(skb); addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset); port = serr->port; _net("Rx UDP Error from %pI4:%hu", &addr, ntohs(port)); _debug("Msg l:%d d:%d", skb->len, skb->data_len); peer = rxrpc_find_peer(local, addr, port); if (IS_ERR(peer)) { rxrpc_free_skb(skb); _leave(" [no peer]"); return; } trans = rxrpc_find_transport(local, peer); if (!trans) { rxrpc_put_peer(peer); rxrpc_free_skb(skb); _leave(" [no trans]"); return; } if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && serr->ee.ee_type == ICMP_DEST_UNREACH && serr->ee.ee_code == ICMP_FRAG_NEEDED ) { u32 mtu = serr->ee.ee_info; _net("Rx Received ICMP Fragmentation Needed (%d)", mtu); if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { peer->if_mtu = mtu; _net("I/F MTU %u", mtu); } if (mtu == 0) mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); if (mtu == 0) { if (mtu > 1500) { mtu >>= 1; if (mtu < 1500) mtu = 1500; } else {