Esempio n. 1
0
static void
ck_spinlock_lock_with_context(ck_spinlock_t *lock, void *context)
{

	(void)context;
	ck_spinlock_lock(lock);
	return;
}
Esempio n. 2
0
/*
 * Returns:
 *  1 - If message is added to the retry queue.
 *  0 - If message is not added to the retry queue
 */
static int
iio_msg_done(struct qnio_msg *msg)
{
    struct iio_device *device = (struct iio_device*)msg->reserved;
    struct channel *channel = device->channel;
    int retry = 0;
    int do_failover = 0;
    int error;

    ck_spinlock_lock(&device->slock);
    device->active_msg_count --;
    error = msg->hinfo.err;
    if (error == QNIOERROR_HUP) {
        nioDbg("QNIOERROR_HUP received on msgid=%ld %p",msg->hinfo.cookie, msg);
        switch (device->state) {
        case IIO_DEVICE_ACTIVE:
            device->state = IIO_DEVICE_QUIESCE;
            /* Continue */

        case IIO_DEVICE_QUIESCE:
        case IIO_DEVICE_FAILOVER:
            device->retry_msg_count ++;
            device->active_msg_count ++;
            channel->cd->chdrv_msg_resend_cleanup(msg);
            LIST_ADD(&device->retryq, &msg->lnode);
            retry = 1;
            break;

        case IIO_DEVICE_FAILED:
            break;

        default:
            nioDbg("Unknown device state");
            break;
        }
    } else if (error) {
        nioDbg("message failed with error %d", error);
    }

    if (device->state == IIO_DEVICE_QUIESCE &&
        device->active_msg_count == device->retry_msg_count) {
        device->state = IIO_DEVICE_FAILOVER;
        do_failover = 1;
    }
    ck_spinlock_unlock(&device->slock);

    if (do_failover) {
        iio_device_failover(device);
    }

    return retry;
}
Esempio n. 3
0
static int32_t
iio_msg_submit(struct iio_device *device, struct qnio_msg *msg, uint32_t flags)
{
    int32_t err;
    int retry;
    struct channel *channel;

    nioDbg("iio_msg_submit: msg=%p, usr_ctx=%p, opcode=%d",
           msg, msg->user_ctx, (int)msg->hinfo.opcode);
    ck_spinlock_lock(&device->slock);
    if (device->state == IIO_DEVICE_FAILED) {
        ck_spinlock_unlock(&device->slock);
        msg->hinfo.err = QNIOERROR_NOCONN;
        errno = ENXIO;
        return -1;
    }
    device->active_msg_count ++;
    if (device->state == IIO_DEVICE_FAILOVER ||
        device->state == IIO_DEVICE_QUIESCE) {
        device->retry_msg_count ++;
        LIST_ADD(&device->retryq, &msg->lnode);
        ck_spinlock_unlock(&device->slock);
        return 0;
    }
    ck_spinlock_unlock(&device->slock); 
    if(flags & IIO_FLAG_ASYNC) {
        msg->hinfo.flags |= QNIO_FLAG_REQ;
    } else {
        msg->hinfo.flags |= QNIO_FLAG_SYNC_REQ;
    }
    msg->reserved = device;
    channel = device->channel;
    err = channel->cd->chdrv_msg_send(channel, msg);
    if(err != 0) {
        retry = iio_msg_done(msg);
        if (retry) {
            err = 0;
        }
    }
    return err;
}
Esempio n. 4
0
static void *
iio_device_failover_thread(void *args)
{
    struct iio_device *device = (struct iio_device *)args;
    struct iio_vdisk_hostinfo *hostinfo = device->hostinfo;
    struct iio_vdisk_hostinfo *new_hostinfo;
    struct channel *new_channel;
    struct qnio_msg *msg;
    time_t start_t, end_t;
    double diff_t;

    time(&start_t);
    nioDbg("Starting failover on device %s", device->devid);

read_hostinfo:
    new_hostinfo = iio_read_hostinfo(device->devid);
    if (new_hostinfo) {
        free(hostinfo);
        device->hostinfo = new_hostinfo;
        hostinfo = new_hostinfo;
    }
    hostinfo->failover_idx = -1;

retry_nexthost:
    /*
     * Find next host
     */
    hostinfo->failover_idx ++;
    if (hostinfo->failover_idx == hostinfo->nhosts) {
        goto read_hostinfo;
    }

    /*
     * Open channel to the new host
     */
    new_channel = iio_channel_open(hostinfo->hosts[hostinfo->failover_idx],
                                   device->channel->cacert,
                                   device->channel->client_key,
                                   device->channel->client_cert);
    if (new_channel == NULL) {
        time(&end_t);
        diff_t = difftime(end_t, start_t);
        if (diff_t > FAILOVER_TIMEOUT) {
            nioDbg("Failover timedout");
            goto err;
        }
        usleep(FAILOVER_RETRY_WAIT);
        goto retry_nexthost;
    }

    /*
     * Close the old channel.
     */
    device->channel->cd->chdrv_close(device->channel);
    device->channel = new_channel;

    if (!iio_check_failover_ready(device)) {
        goto retry_nexthost;
    }

    /*
     * Restart messages
     */
    ck_spinlock_lock(&device->slock);
    device->state = IIO_DEVICE_ACTIVE; 
    while (!LIST_EMPTY(&device->retryq)) {
        msg = LIST_ENTRY(device->retryq.next, struct qnio_msg, lnode);
        LIST_DEL(&msg->lnode);
        device->retry_msg_count --;
        ck_spinlock_unlock(&device->slock);
        nioDbg("Restarting message, msgid=%ld %p", msg->hinfo.cookie, msg);
        iio_msg_resubmit(device, msg);
        ck_spinlock_lock(&device->slock);
    }
    ck_spinlock_unlock(&device->slock);
    pthread_exit(0);
    return NULL;

err:
    /*
     * Fail all messages.
     */
    ck_spinlock_lock(&device->slock);
    device->state = IIO_DEVICE_FAILED;
    while (!LIST_EMPTY(&device->retryq)) {
        msg = LIST_ENTRY(device->retryq.next, struct qnio_msg, lnode);
        LIST_DEL(&msg->lnode);
        nioDbg("No host found failing message, msgid=%ld %p", msg->hinfo.cookie, msg);
        device->retry_msg_count --;
        ck_spinlock_unlock(&device->slock);
        msg->hinfo.err = QNIOERROR_NOCONN;
        if (msg->hinfo.flags & QNIO_FLAG_SYNC_REQ) {
            ck_pr_store_int(&msg->resp_ready, 1);
        } else {
            client_callback(msg);
        }
    }
    ck_spinlock_unlock(&device->slock);
    pthread_exit(0);
    return NULL;    
}