Example #1
0
/*
 * Workitem scheduled with (serial == 1) is strictly serialised not only with
 * itself, but also with others scheduled this way.
 *
 * Now there's only one static serialised queue, but in the future more might
 * be added, and even dynamic creation of serialised queues might be supported.
 */
void
cfs_wi_schedule(cfs_workitem_t *wi)
{
        cfs_wi_sched_t *sched = cfs_wi_to_sched(wi);

        LASSERT (!cfs_in_interrupt()); /* because we use plain spinlock */
        LASSERT (!sched->ws_shuttingdown);

        cfs_wi_sched_lock(sched);

        if (!wi->wi_scheduled) {
                LASSERT (cfs_list_empty(&wi->wi_list));

                wi->wi_scheduled = 1;
                if (!wi->wi_running) {
                        cfs_list_add_tail(&wi->wi_list, &sched->ws_runq);
#ifdef __KERNEL__
                        cfs_waitq_signal(&sched->ws_waitq);
#endif
                } else {
                        cfs_list_add(&wi->wi_list, &sched->ws_rerunq);
                }
        }

        LASSERT (!cfs_list_empty(&wi->wi_list));
        cfs_wi_sched_unlock(sched);
        return;
}
Example #2
0
/* must be called with resource lock held */
static int
lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
{
	struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];

	/* NB we are passed an allocated, but inactive md.
	 * if we return success, caller may lnet_md_unlink() it.
	 * otherwise caller may only lnet_md_free() it.
	 */
	/* This implementation doesn't know how to create START events or
	 * disable END events.  Best to LASSERT our caller is compliant so
	 * we find out quickly...  */
	/*  TODO - reevaluate what should be here in light of
	 * the removal of the start and end events
	 * maybe there we shouldn't even allow LNET_EQ_NONE!)
	 * LASSERT (eq == NULL);
	 */
	if (!LNetHandleIsInvalid(eq_handle)) {
		md->md_eq = lnet_handle2eq(&eq_handle);

		if (md->md_eq == NULL)
			return -ENOENT;

		(*md->md_eq->eq_refs[cpt])++;
	}

	lnet_res_lh_initialize(container, &md->md_lh);

	LASSERT(cfs_list_empty(&md->md_list));
	cfs_list_add(&md->md_list, &container->rec_active);

	return 0;
}
Example #3
0
static int
usocklnd_send_tx_immediately(usock_conn_t *conn, usock_tx_t *tx)
{
        int           rc;
        int           rc2;
        int           partial_send = 0;
        usock_peer_t *peer         = conn->uc_peer;

        LASSERT (peer != NULL);

        /* usocklnd_enqueue_tx() turned it on for us */
        LASSERT(conn->uc_sending);

        //counter_imm_start++;
        rc = usocklnd_send_tx(conn, tx);
        if (rc == 0) { /* partial send or connection closed */
                pthread_mutex_lock(&conn->uc_lock);
                cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
                conn->uc_sending = 0;
                pthread_mutex_unlock(&conn->uc_lock);
                partial_send = 1;
        } else {
                usocklnd_destroy_tx(peer->up_ni, tx);
                /* NB: lnetmsg was finalized, so we *must* return 0 */

                if (rc < 0) { /* real error */
                        usocklnd_conn_kill(conn);
                        return 0;
                }

                /* rc == 1: tx was sent completely */
                rc = 0; /* let's say to caller 'Ok' */
                //counter_imm_complete++;
        }

        pthread_mutex_lock(&conn->uc_lock);
        conn->uc_sending = 0;

        /* schedule write handler */
        if (partial_send ||
            (conn->uc_state == UC_READY &&
             (!cfs_list_empty(&conn->uc_tx_list) ||
              !cfs_list_empty(&conn->uc_zcack_list)))) {
                conn->uc_tx_deadline =
                        cfs_time_shift(usock_tuns.ut_timeout);
                conn->uc_tx_flag = 1;
                rc2 = usocklnd_add_pollrequest(conn, POLL_TX_SET_REQUEST, POLLOUT);
                if (rc2 != 0)
                        usocklnd_conn_kill_locked(conn);
                else
                        usocklnd_wakeup_pollthread(conn->uc_pt_idx);
        }

        pthread_mutex_unlock(&conn->uc_lock);

        return rc;
}
Example #4
0
int
lwt_init ()
{
	int     i;
        int     j;

        for (i = 0; i < cfs_num_online_cpus(); i++)
                if (lwt_cpus[i].lwtc_current_page != NULL)
                        return (-EALREADY);

        LASSERT (!lwt_enabled);

	/* NULL pointers, zero scalars */
	memset (lwt_cpus, 0, sizeof (lwt_cpus));
	lwt_pages_per_cpu =
		LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);

	for (i = 0; i < cfs_num_online_cpus(); i++)
		for (j = 0; j < lwt_pages_per_cpu; j++) {
			struct page *page = alloc_page(GFP_KERNEL);
			lwt_page_t  *lwtp;

			if (page == NULL) {
				CERROR ("Can't allocate page\n");
                                lwt_fini ();
				return (-ENOMEM);
			}

                        LIBCFS_ALLOC(lwtp, sizeof (*lwtp));
			if (lwtp == NULL) {
				CERROR ("Can't allocate lwtp\n");
                                __free_page(page);
				lwt_fini ();
				return (-ENOMEM);
			}

                        lwtp->lwtp_page = page;
                        lwtp->lwtp_events = page_address(page);
			memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);

			if (j == 0) {
				CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
				lwt_cpus[i].lwtc_current_page = lwtp;
			} else {
				cfs_list_add (&lwtp->lwtp_list,
				    &lwt_cpus[i].lwtc_current_page->lwtp_list);
			}
                }

        lwt_enabled = 1;
        cfs_mb();

        LWT_EVENT(0,0,0,0);

        return (0);
}
Example #5
0
/*
 * Add object to list of dirty objects in tx handle.
 */
static void
osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
{
	if (!cfs_list_empty(&obj->oo_sa_linkage))
		return;

	down(&oh->ot_sa_lock);
	write_lock(&obj->oo_attr_lock);
	if (likely(cfs_list_empty(&obj->oo_sa_linkage)))
		cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
	write_unlock(&obj->oo_attr_lock);
	up(&oh->ot_sa_lock);
}
Example #6
0
/** records that a write is in flight */
void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
{
	struct ll_inode_info *lli = ll_i2info(club->cob_inode);

	ENTRY;
	spin_lock(&lli->lli_lock);
	lli->lli_flags |= LLIF_SOM_DIRTY;
	if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
		cfs_list_add(&page->cpg_pending_linkage,
			     &club->cob_pending_list);
	spin_unlock(&lli->lli_lock);
	EXIT;
}
struct shrinker *set_shrinker(int seeks, shrink_callback cb)
{
	struct shrinker *s = (struct shrinker *)
	kmalloc(sizeof(struct shrinker), __GFP_ZERO);
	if (s) {
		s->cb = cb;
		s->seeks = seeks;
		s->nr = 2;
		spin_lock(&shrinker_guard);
		cfs_list_add(&s->list, &shrinker_hdr);
		spin_unlock(&shrinker_guard);
	}

	return s;
}
Example #8
0
static void lcw_cb(ulong_ptr_t data)
{
        struct lc_watchdog *lcw = (struct lc_watchdog *)data;
        ENTRY;

        if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
                EXIT;
                return;
        }

        lcw->lcw_state = LC_WATCHDOG_EXPIRED;

        cfs_spin_lock_bh(&lcw->lcw_lock);
        LASSERT(cfs_list_empty(&lcw->lcw_list));

        cfs_spin_lock_bh(&lcw_pending_timers_lock);
        lcw->lcw_refcount++; /* +1 for pending list */
        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
        cfs_waitq_signal(&lcw_event_waitq);

        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
        cfs_spin_unlock_bh(&lcw->lcw_lock);
        EXIT;
}
Example #9
0
static int lcw_dispatch_main(void *data)
{
        int                 rc = 0;
        unsigned long       flags;
        struct lc_watchdog *lcw;
        CFS_LIST_HEAD      (zombies);

        ENTRY;

        cfs_daemonize("lc_watchdogd");

        SIGNAL_MASK_LOCK(current, flags);
        sigfillset(&current->blocked);
        RECALC_SIGPENDING;
        SIGNAL_MASK_UNLOCK(current, flags);

        cfs_complete(&lcw_start_completion);

        while (1) {
                int dumplog = 1;

                cfs_wait_event_interruptible(lcw_event_waitq,
                                             is_watchdog_fired(), rc);
                CDEBUG(D_INFO, "Watchdog got woken up...\n");
                if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
                        CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");

                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
                        rc = !cfs_list_empty(&lcw_pending_timers);
                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
                        if (rc) {
                                CERROR("pending timers list was not empty at "
                                       "time of watchdog dispatch shutdown\n");
                        }
                        break;
                }

                cfs_spin_lock_bh(&lcw_pending_timers_lock);
                while (!cfs_list_empty(&lcw_pending_timers)) {
                        int is_dumplog;

                        lcw = cfs_list_entry(lcw_pending_timers.next,
                                             struct lc_watchdog, lcw_list);
                        /* +1 ref for callback to make sure lwc wouldn't be
                         * deleted after releasing lcw_pending_timers_lock */
                        lcw->lcw_refcount++;
                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);

                        /* lock ordering */
                        cfs_spin_lock_bh(&lcw->lcw_lock);
                        cfs_spin_lock_bh(&lcw_pending_timers_lock);

                        if (cfs_list_empty(&lcw->lcw_list)) {
                                /* already removed from pending list */
                                lcw->lcw_refcount--; /* -1 ref for callback */
                                if (lcw->lcw_refcount == 0)
                                        cfs_list_add(&lcw->lcw_list, &zombies);
                                cfs_spin_unlock_bh(&lcw->lcw_lock);
                                /* still hold lcw_pending_timers_lock */
                                continue;
                        }

                        cfs_list_del_init(&lcw->lcw_list);
                        lcw->lcw_refcount--; /* -1 ref for pending list */

                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
                        cfs_spin_unlock_bh(&lcw->lcw_lock);

                        CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
                               lcw->lcw_pid);
                        lcw_dump_stack(lcw);

                        is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
                        if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
                            (dumplog || !is_dumplog)) {
                                lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
                                if (dumplog && is_dumplog)
                                        dumplog = 0;
                        }

                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
                        lcw->lcw_refcount--; /* -1 ref for callback */
                        if (lcw->lcw_refcount == 0)
                                cfs_list_add(&lcw->lcw_list, &zombies);
                }
                cfs_spin_unlock_bh(&lcw_pending_timers_lock);

                while (!cfs_list_empty(&zombies)) {
                        lcw = cfs_list_entry(lcw_pending_timers.next,
                                         struct lc_watchdog, lcw_list);
                        cfs_list_del(&lcw->lcw_list);
                        LIBCFS_FREE(lcw, sizeof(*lcw));
                }
        }

        cfs_complete(&lcw_stop_completion);

        RETURN(rc);
}
Example #10
0
/* All actions that we need after sending hello on passive conn:
 * 1) Cope with 1st easy case: conn is already linked to a peer
 * 2) Cope with 2nd easy case: remove zombie conn
  * 3) Resolve race:
 *    a) find the peer
 *    b) link the conn to the peer if conn[idx] is empty
 *    c) if the conn[idx] isn't empty and is in READY state,
 *       remove the conn as duplicated
 *    d) if the conn[idx] isn't empty and isn't in READY state,
 *       override conn[idx] with the conn
 */
int
usocklnd_passiveconn_hellosent(usock_conn_t *conn)
{
        usock_conn_t    *conn2;
        usock_peer_t    *peer;
        cfs_list_t       tx_list;
        cfs_list_t       zcack_list;
        int              idx;
        int              rc = 0;

        /* almost nothing to do if conn is already linked to peer hash table */
        if (conn->uc_peer != NULL)
                goto passive_hellosent_done;

        /* conn->uc_peer == NULL, so the conn isn't accessible via
         * peer hash list, so nobody can touch the conn but us */

        if (conn->uc_ni == NULL) /* remove zombie conn */
                goto passive_hellosent_connkill;

        /* all code below is race resolution, because normally
         * passive conn is linked to peer just after receiving hello */
        CFS_INIT_LIST_HEAD (&tx_list);
        CFS_INIT_LIST_HEAD (&zcack_list);

        /* conn is passive and isn't linked to any peer,
           so its tx and zc_ack lists have to be empty */
        LASSERT (cfs_list_empty(&conn->uc_tx_list) &&
                 cfs_list_empty(&conn->uc_zcack_list) &&
                 conn->uc_sending == 0);

        rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
        if (rc)
                return rc;

        idx = usocklnd_type2idx(conn->uc_type);

        /* try to link conn to peer */
        pthread_mutex_lock(&peer->up_lock);
        if (peer->up_conns[idx] == NULL) {
                usocklnd_link_conn_to_peer(conn, peer, idx);
                usocklnd_conn_addref(conn);
                conn->uc_peer = peer;
                usocklnd_peer_addref(peer);
        } else {
                conn2 = peer->up_conns[idx];
                pthread_mutex_lock(&conn2->uc_lock);

                if (conn2->uc_state == UC_READY) {
                        /* conn2 is in READY state, so conn is "duplicated" */
                        pthread_mutex_unlock(&conn2->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        usocklnd_peer_decref(peer);
                        goto passive_hellosent_connkill;
                }

                /* uc_state != UC_READY => switch conn and conn2 */
                /* Relink txs and zc_acks from conn2 to conn.
                 * We're sure that nobody but us can access to conn,
                 * nevertheless we use mutex (if we're wrong yet,
                 * deadlock is easy to see that corrupted list */
                cfs_list_add(&tx_list, &conn2->uc_tx_list);
                cfs_list_del_init(&conn2->uc_tx_list);
                cfs_list_add(&zcack_list, &conn2->uc_zcack_list);
                cfs_list_del_init(&conn2->uc_zcack_list);

                pthread_mutex_lock(&conn->uc_lock);
                cfs_list_add_tail(&conn->uc_tx_list, &tx_list);
                cfs_list_del_init(&tx_list);
                cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list);
                cfs_list_del_init(&zcack_list);
                conn->uc_peer = peer;
                pthread_mutex_unlock(&conn->uc_lock);

                conn2->uc_peer = NULL; /* make conn2 zombie */
                pthread_mutex_unlock(&conn2->uc_lock);
                usocklnd_conn_decref(conn2);

                usocklnd_link_conn_to_peer(conn, peer, idx);
                usocklnd_conn_addref(conn);
                conn->uc_peer = peer;
        }

        lnet_ni_decref(conn->uc_ni);
        conn->uc_ni = NULL;
        pthread_mutex_unlock(&peer->up_lock);
        usocklnd_peer_decref(peer);

  passive_hellosent_done:
        /* safely transit to UC_READY state */
        /* rc == 0 */
        pthread_mutex_lock(&conn->uc_lock);
        if (conn->uc_state != UC_DEAD) {
                usocklnd_rx_ksmhdr_state_transition(conn);

                /* we're ready to recive incoming packets and maybe
                   already have smth. to transmit */
                LASSERT (conn->uc_sending == 0);
                if ( cfs_list_empty(&conn->uc_tx_list) &&
                     cfs_list_empty(&conn->uc_zcack_list) ) {
                        conn->uc_tx_flag = 0;
                        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
                                                 POLLIN);
                } else {
                        conn->uc_tx_deadline =
                                cfs_time_shift(usock_tuns.ut_timeout);
                        conn->uc_tx_flag = 1;
                        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
                                                      POLLIN | POLLOUT);
                }

                if (rc == 0)
                        conn->uc_state = UC_READY;
        }
        pthread_mutex_unlock(&conn->uc_lock);
        return rc;

  passive_hellosent_connkill:
        usocklnd_conn_kill(conn);
        return 0;
}
Example #11
0
int
usocklnd_write_handler(usock_conn_t *conn)
{
        usock_tx_t   *tx;
        int           ret;
        int           rc = 0;
        int           state;
        usock_peer_t *peer;
        lnet_ni_t    *ni;

        pthread_mutex_lock(&conn->uc_lock); /* like membar */
        state = conn->uc_state;
        pthread_mutex_unlock(&conn->uc_lock);

        switch (state) {
        case UC_CONNECTING:
                /* hello_tx has already been initialized
                 * in usocklnd_create_active_conn() */
                usocklnd_conn_new_state(conn, UC_SENDING_HELLO);
                /* fall through */

        case UC_SENDING_HELLO:
                rc = usocklnd_send_tx(conn, conn->uc_tx_hello);
                if (rc <= 0) /* error or partial send or connection closed */
                        break;

                /* tx with hello was sent successfully */
                usocklnd_destroy_tx(NULL, conn->uc_tx_hello);
                conn->uc_tx_hello = NULL;

                if (conn->uc_activeflag == 1) /* active conn */
                        rc = usocklnd_activeconn_hellosent(conn);
                else                          /* passive conn */
                        rc = usocklnd_passiveconn_hellosent(conn);

                break;

        case UC_READY:
                pthread_mutex_lock(&conn->uc_lock);

                peer = conn->uc_peer;
                LASSERT (peer != NULL);
                ni = peer->up_ni;

                if (cfs_list_empty(&conn->uc_tx_list) &&
                    cfs_list_empty(&conn->uc_zcack_list)) {
                        LASSERT(usock_tuns.ut_fair_limit > 1);
                        pthread_mutex_unlock(&conn->uc_lock);
                        return 0;
                }

                tx = usocklnd_try_piggyback(&conn->uc_tx_list,
                                            &conn->uc_zcack_list);
                if (tx != NULL)
                        conn->uc_sending = 1;
                else
                        rc = -ENOMEM;

                pthread_mutex_unlock(&conn->uc_lock);

                if (rc)
                        break;

                rc = usocklnd_send_tx(conn, tx);
                if (rc == 0) { /* partial send or connection closed */
                        pthread_mutex_lock(&conn->uc_lock);
                        cfs_list_add(&tx->tx_list, &conn->uc_tx_list);
                        conn->uc_sending = 0;
                        pthread_mutex_unlock(&conn->uc_lock);
                        break;
                }
                if (rc < 0) { /* real error */
                        usocklnd_destroy_tx(ni, tx);
                        break;
                }

                /* rc == 1: tx was sent completely */
                usocklnd_destroy_tx(ni, tx);

                pthread_mutex_lock(&conn->uc_lock);
                conn->uc_sending = 0;
                if (conn->uc_state != UC_DEAD &&
                    cfs_list_empty(&conn->uc_tx_list) &&
                    cfs_list_empty(&conn->uc_zcack_list)) {
                        conn->uc_tx_flag = 0;
                        ret = usocklnd_add_pollrequest(conn,
                                                      POLL_TX_SET_REQUEST, 0);
                        if (ret)
                                rc = ret;
                }
                pthread_mutex_unlock(&conn->uc_lock);

                break;

        case UC_DEAD:
                break;

        default:
                LBUG();
        }

        if (rc < 0)
                usocklnd_conn_kill(conn);

        return rc;
}
Example #12
0
/* All actions that we need after receiving hello on active conn:
 * 1) Schedule removing if we're zombie
 * 2) Restart active conn if we lost the race
 * 3) Else: update RX part to receive KSM header
 */
int
usocklnd_activeconn_hellorecv(usock_conn_t *conn)
{
        int                rc    = 0;
        ksock_hello_msg_t *hello = conn->uc_rx_hello;
        usock_peer_t      *peer  = conn->uc_peer;

        /* Active conn with peer==NULL is zombie.
         * Don't try to link it to peer because the conn
         * has already had a chance to proceed at the beginning */
        if (peer == NULL) {
                LASSERT(cfs_list_empty(&conn->uc_tx_list) &&
                        cfs_list_empty(&conn->uc_zcack_list));

                usocklnd_conn_kill(conn);
                return 0;
        }

        peer->up_last_alive = cfs_time_current();

        /* peer says that we lost the race */
        if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
                /* Start new active conn, relink txs and zc_acks from
                 * the conn to new conn, schedule removing the conn.
                 * Actually, we're expecting that a passive conn will
                 * make us zombie soon and take care of our txs and
                 * zc_acks */

                cfs_list_t tx_list, zcack_list;
                usock_conn_t *conn2;
                int idx = usocklnd_type2idx(conn->uc_type);

                CFS_INIT_LIST_HEAD (&tx_list);
                CFS_INIT_LIST_HEAD (&zcack_list);

                /* Block usocklnd_send() to check peer->up_conns[idx]
                 * and to enqueue more txs */
                pthread_mutex_lock(&peer->up_lock);
                pthread_mutex_lock(&conn->uc_lock);

                /* usocklnd_shutdown() could kill us */
                if (conn->uc_state == UC_DEAD) {
                        pthread_mutex_unlock(&conn->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        return 0;
                }

                LASSERT (peer == conn->uc_peer);
                LASSERT (peer->up_conns[idx] == conn);

                rc = usocklnd_create_active_conn(peer, conn->uc_type, &conn2);
                if (rc) {
                        conn->uc_errored = 1;
                        pthread_mutex_unlock(&conn->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        return rc;
                }

                usocklnd_link_conn_to_peer(conn2, peer, idx);
                conn2->uc_peer = peer;

                /* unlink txs and zcack from the conn */
                cfs_list_add(&tx_list, &conn->uc_tx_list);
                cfs_list_del_init(&conn->uc_tx_list);
                cfs_list_add(&zcack_list, &conn->uc_zcack_list);
                cfs_list_del_init(&conn->uc_zcack_list);

                /* link they to the conn2 */
                cfs_list_add(&conn2->uc_tx_list, &tx_list);
                cfs_list_del_init(&tx_list);
                cfs_list_add(&conn2->uc_zcack_list, &zcack_list);
                cfs_list_del_init(&zcack_list);

                /* make conn zombie */
                conn->uc_peer = NULL;
                usocklnd_peer_decref(peer);

                /* schedule conn2 for processing */
                rc = usocklnd_add_pollrequest(conn2, POLL_ADD_REQUEST, POLLOUT);
                if (rc) {
                        peer->up_conns[idx] = NULL;
                        usocklnd_conn_decref(conn2); /* should destroy conn */
                } else {
                        usocklnd_conn_kill_locked(conn);
                }

                pthread_mutex_unlock(&conn->uc_lock);
                pthread_mutex_unlock(&peer->up_lock);
                usocklnd_conn_decref(conn);

        } else { /* hello->kshm_ctype != SOCKLND_CONN_NONE */
                if (conn->uc_type != usocklnd_invert_type(hello->kshm_ctype))
                        return -EPROTO;

                pthread_mutex_lock(&peer->up_lock);
                usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation,
                                             conn);
                pthread_mutex_unlock(&peer->up_lock);

                /* safely transit to UC_READY state */
                /* rc == 0 */
                pthread_mutex_lock(&conn->uc_lock);
                if (conn->uc_state != UC_DEAD) {
                        usocklnd_rx_ksmhdr_state_transition(conn);

                        /* POLLIN is already set because we just
                         * received hello, but maybe we've smth. to
                         * send? */
                        LASSERT (conn->uc_sending == 0);
                        if ( !cfs_list_empty(&conn->uc_tx_list) ||
                             !cfs_list_empty(&conn->uc_zcack_list) ) {

                                conn->uc_tx_deadline =
                                        cfs_time_shift(usock_tuns.ut_timeout);
                                conn->uc_tx_flag = 1;
                                rc = usocklnd_add_pollrequest(conn,
                                                              POLL_SET_REQUEST,
                                                              POLLIN | POLLOUT);
                        }

                        if (rc == 0)
                                conn->uc_state = UC_READY;
                }
                pthread_mutex_unlock(&conn->uc_lock);
        }

        return rc;
}
Example #13
0
static int mdt_reint_setattr(struct mdt_thread_info *info,
                             struct mdt_lock_handle *lhc)
{
        struct md_attr          *ma = &info->mti_attr;
        struct mdt_reint_record *rr = &info->mti_rr;
        struct ptlrpc_request   *req = mdt_info_req(info);
        struct mdt_export_data  *med = &req->rq_export->exp_mdt_data;
        struct mdt_file_data    *mfd;
        struct mdt_object       *mo;
        struct mdt_body         *repbody;
        int                      som_au, rc, rc2;
        ENTRY;

        DEBUG_REQ(D_INODE, req, "setattr "DFID" %x", PFID(rr->rr_fid1),
                  (unsigned int)ma->ma_attr.la_valid);

        if (info->mti_dlm_req)
                ldlm_request_cancel(req, info->mti_dlm_req, 0);

        repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
        mo = mdt_object_find(info->mti_env, info->mti_mdt, rr->rr_fid1);
        if (IS_ERR(mo))
                GOTO(out, rc = PTR_ERR(mo));

	if (mdt_object_obf(mo))
		GOTO(out_put, rc = -EPERM);

        /* start a log jounal handle if needed */
        if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM)) {
                if ((ma->ma_attr.la_valid & LA_SIZE) ||
                    (rr->rr_flags & MRF_OPEN_TRUNC)) {
                        /* Check write access for the O_TRUNC case */
                        if (mdt_write_read(mo) < 0)
                                GOTO(out_put, rc = -ETXTBSY);
                }
        } else if (info->mti_ioepoch &&
                   (info->mti_ioepoch->flags & MF_EPOCH_OPEN)) {
                /* Truncate case. IOEpoch is opened. */
                rc = mdt_write_get(mo);
                if (rc)
                        GOTO(out_put, rc);

                mfd = mdt_mfd_new();
                if (mfd == NULL) {
                        mdt_write_put(mo);
                        GOTO(out_put, rc = -ENOMEM);
                }

                mdt_ioepoch_open(info, mo, 0);
                repbody->ioepoch = mo->mot_ioepoch;

                mdt_object_get(info->mti_env, mo);
                mdt_mfd_set_mode(mfd, MDS_FMODE_TRUNC);
                mfd->mfd_object = mo;
                mfd->mfd_xid = req->rq_xid;

		spin_lock(&med->med_open_lock);
		cfs_list_add(&mfd->mfd_list, &med->med_open_head);
		spin_unlock(&med->med_open_lock);
                repbody->handle.cookie = mfd->mfd_handle.h_cookie;
        }

        som_au = info->mti_ioepoch && info->mti_ioepoch->flags & MF_SOM_CHANGE;
        if (som_au) {
                /* SOM Attribute update case. Find the proper mfd and update
                 * SOM attributes on the proper object. */
                LASSERT(mdt_conn_flags(info) & OBD_CONNECT_SOM);
                LASSERT(info->mti_ioepoch);

		spin_lock(&med->med_open_lock);
		mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
		if (mfd == NULL) {
			spin_unlock(&med->med_open_lock);
                        CDEBUG(D_INODE, "no handle for file close: "
                               "fid = "DFID": cookie = "LPX64"\n",
                               PFID(info->mti_rr.rr_fid1),
                               info->mti_ioepoch->handle.cookie);
                        GOTO(out_put, rc = -ESTALE);
                }
                LASSERT(mfd->mfd_mode == MDS_FMODE_SOM);
                LASSERT(!(info->mti_ioepoch->flags & MF_EPOCH_CLOSE));

                class_handle_unhash(&mfd->mfd_handle);
                cfs_list_del_init(&mfd->mfd_list);
		spin_unlock(&med->med_open_lock);

                mdt_mfd_close(info, mfd);
	} else if ((ma->ma_valid & MA_INODE) && ma->ma_attr.la_valid) {
		LASSERT((ma->ma_valid & MA_LOV) == 0);
                rc = mdt_attr_set(info, mo, ma, rr->rr_flags);
                if (rc)
                        GOTO(out_put, rc);
	} else if ((ma->ma_valid & MA_LOV) && (ma->ma_valid & MA_INODE)) {
		struct lu_buf *buf  = &info->mti_buf;
		LASSERT(ma->ma_attr.la_valid == 0);
		buf->lb_buf = ma->ma_lmm;
		buf->lb_len = ma->ma_lmm_size;
		rc = mo_xattr_set(info->mti_env, mdt_object_child(mo),
				  buf, XATTR_NAME_LOV, 0);
		if (rc)
			GOTO(out_put, rc);
	} else
		LBUG();

	/* If file data is modified, add the dirty flag */
	if (ma->ma_attr_flags & MDS_DATA_MODIFIED)
		rc = mdt_add_dirty_flag(info, mo, ma);

        ma->ma_need = MA_INODE;
        ma->ma_valid = 0;
	rc = mdt_attr_get_complex(info, mo, ma);
        if (rc != 0)
                GOTO(out_put, rc);

        mdt_pack_attr2body(info, repbody, &ma->ma_attr, mdt_object_fid(mo));

	if (info->mti_mdt->mdt_opts.mo_oss_capa &&
	    exp_connect_flags(info->mti_exp) & OBD_CONNECT_OSS_CAPA &&
	    S_ISREG(lu_object_attr(&mo->mot_obj.mo_lu)) &&
	    (ma->ma_attr.la_valid & LA_SIZE) && !som_au) {
                struct lustre_capa *capa;

                capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
                LASSERT(capa);
                capa->lc_opc = CAPA_OPC_OSS_DEFAULT | CAPA_OPC_OSS_TRUNC;
                rc = mo_capa_get(info->mti_env, mdt_object_child(mo), capa, 0);
                if (rc)
                        GOTO(out_put, rc);
                repbody->valid |= OBD_MD_FLOSSCAPA;
        }

        EXIT;
out_put:
        mdt_object_put(info->mti_env, mo);
out:
        if (rc == 0)
		mdt_counter_incr(req, LPROC_MDT_SETATTR);

        mdt_client_compatibility(info);
        rc2 = mdt_fix_reply(info);
        if (rc == 0)
                rc = rc2;
        return rc;
}