int test_scsi_cmds()
{
    int rc;
    struct ctx myctx;
    struct ctx *p_ctx = &myctx;
    __u64 chunk = 16;
    pthread_t thread;
    __u64 stride = 0x10;
    __u64 nlba;
    uint8_t opcode[]={ 0x00,0xA0,0x09E,0x12,0x03,0x1B,0x5A,0x55 };
    int index;
    pid = getpid();
    rc = ctx_init(p_ctx);
    int i;
    CHECK_RC(rc, "Context init failed");

    pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx);

    p_ctx->flags = DK_UVF_ALL_PATHS;
    p_ctx->lun_size = chunk * p_ctx->chunk_size;
    rc = create_res(p_ctx);
    CHECK_RC(rc, "create_res failed");

    nlba = p_ctx->last_lba+1;
    for (index=0;index <sizeof(opcode);index++)
    {
        debug("%d:sending scsi cmd=0X%"PRIX8" ........\n",pid,opcode[index]);
        fill_send_write(p_ctx, nlba, pid, stride);
        for (i =0;i<NUM_CMDS;i++)
        {
            p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL;
            p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ;
            p_ctx->cmd[i].rcb.cdb[0] = opcode[index];
        }
        send_cmd(p_ctx);
        rc = wait_resp(p_ctx);
#ifndef _AIX
        if (rc != 0x21)
        {
            fprintf(stderr,"%d:failed rc =%d for scsi cmd=0X%"PRIX8",exptd rc=0x21\n",
                    pid,rc,opcode[index]);
            break;
        }
#endif
        debug("%d:rc =%d for scsi cmd=0X%"PRIX8" ........\n",pid,rc,opcode[index]);
        usleep(1000);
    }
    pthread_cancel(thread);
    ctx_close(p_ctx);
    return rc;
}
Beispiel #2
0
static int send_block(struct xm_sender *xms)
{
	int n;
	int i;
	uchar *sp;
	uchar *end = xms->bp + XM_BLKSIZ;

	if ( !xms->last || xms->len - (xms->bp - xms->buf) != XM_BLKSIZ )
		xms->ctl[XMS_CMD] = XM_SOH;
	else
		xms->ctl[XMS_CMD] = XM_EOT;

	xms->ctl[XMS_BLK] = xms->nxtblk;
	xms->ctl[XMS_BNEG] = 255 - xms->nxtblk;
	++xms->nxtblk;

	/* calculate checksum */
	for ( i = 0, xms->ctl[XMS_SUM] = 0; i < XM_BLKSIZ; ++i )
		xms->ctl[XMS_SUM] += xms->bp[i];

	i = 0;
	/* try up to 10 times */
	while ( i < 10 ) {
		sp = xms->bp;
		do { 
			n = xms->ops.send(sp, end - sp);
			if ( n < 0 ) {
				xms->error = XME_IO;
				return -1;
			}
			sp += n;
		} while ( sp < end );

		n = wait_resp(xms);
		if ( n == XM_ACK )
			return 0;
		++i;
	}

	xms->error = XME_TOUT;
	return -1;
}
Beispiel #3
0
int xms_send(struct xm_sender *xms)
{
	int rv;

	if ( xms->first ) {
		rv = wait_resp(xms);
		if ( rv != XM_NAK ) {
			if ( rv >= 0 )
				xms->error = XME_PROTO;
			return -1;
		}
		xms->first = 0;
	}

	while ( xms->bp - xms->buf < xms->len ) {
		rv = send_block(xms);
		if ( rv < 0 )
			return rv;
		xms->bp += XM_BLKSIZ;
	}

	return 0;
}
int test_ctx_reset()
{
    int rc;
    struct ctx myctx;
    struct ctx *p_ctx= &myctx;
    pthread_t thread;
    __u64 buf_size = 0x2000000; //32MB 
    __u64 chunk = 10;
    __u64 stride = 0x1000;
    struct rwlargebuf rwbuf;
    int i;

    pid=getpid();
    rc = ctx_init(p_ctx);
    CHECK_RC(rc, "ctx_init failed");
    pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx);

    rc = create_resource(p_ctx,chunk*p_ctx->chunk_size,DK_UVF_ASSIGN_PATH,LUN_VIRTUAL);
    CHECK_RC(rc, "create LUN_VIRTUAL failed");

    //do bad EA
    if (1)
    {
        debug("%d: ........place bad EA....\n", pid);
        fill_send_write(p_ctx, 0, pid, stride);
        for (i = 0; i < NUM_CMDS; i++)
        {
            p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234;
        }
        bad_address = true;
        send_cmd(p_ctx);
        rc = wait_resp(p_ctx);
        sleep(1);
        //normal IO
        bad_address = false;
        debug("%d: .........after bad EA, do normal IO....\n", pid);
        rc = do_io(p_ctx, stride);
        CHECK_RC(rc,"Normal IO failed after bad EA");

        //do bad RCB
        debug("%d: .........place bad RCB....\n", pid);
        bad_address = true;
        place_bad_addresses(p_ctx, 1);
        sleep(2);
        //normal IO
        debug("%d: ......after bad RCB, do normal IO....\n", pid);
        bad_address = false;
        rc = do_io(p_ctx, stride);
        CHECK_RC(rc,"Normal IO failed after bad RCB");
#ifdef _AIX
        rc = setRUnlimited();
        CHECK_RC(rc,"setRUnlimited() failed"); 
#endif
    }
    //do large _transfer
    debug("%d: Do large transfer ....\n", pid);
    rc = allocate_buf(&rwbuf, buf_size);
    CHECK_RC(rc, "memory allocation failed");
    rc = do_large_io(p_ctx, &rwbuf, buf_size);
    deallocate_buf(&rwbuf);
    buf_size = 0x100000; //4k
    rc = allocate_buf(&rwbuf, buf_size);
    CHECK_RC(rc, "memory allocation failed");
    //normal io
    debug("%d: after large transfer,do normal IO ....\n", pid);
    rc = do_io(p_ctx, 0x10000);
    //rc = do_large_io(p_ctx, &rwbuf, buf_size);
    CHECK_RC(rc,"Normal IO failed after large transfer");

    pthread_cancel(thread);
    close_res(p_ctx);
    ctx_close(p_ctx);
    return rc;
}
/*
 * create two ctx process & 2 resource handler each ctx
 * use diff ctx handler in diff process, get another process
 * ctx handler through PIPE.
 */
int mc_test_inter_prcs_ctx_int(int cmd)
{
    int rc;
    struct ctx myctx;
    struct ctx *p_ctx = &myctx;
    res_hndl_t res_hndl;
    ctx_hndl_t ctx_hndl;
    int pdes[2];
    pid_t cpid;
    pthread_t thread;
    __u64 stride = 0x1000;
    int i;
    //create pipe, child open for write
    // parent open for read

    pipe(pdes);
    cpid = fork();
    if ( 0 == cpid)
    {
        //child one running
        pid = getpid();
        debug("%d: child do init_mc \n", pid);
        rc = init_mc(p_ctx, &res_hndl);
        if (rc)
        {
            fprintf(stderr, "%d: exiting due to init_mc\n:", pid);
            exit(rc);
        }
        //do write into pipe & wait until parent kill me
        close(pdes[0]); //close read des
        write(pdes[1], &p_ctx->ctx_hndl, sizeof(ctx_hndl_t));
        while (1);
    }
    else
    {
        //parent
        close(pdes[1]); //close write des
        //lets child do there work & wait for me
        sleep(1);
        pid = getpid();
        rc = init_mc(p_ctx, &res_hndl);
        if (rc)
        {
            kill(cpid, SIGKILL);
            return rc;
        }
        pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx);
        read(pdes[0], &ctx_hndl, sizeof(ctx_hndl_t));
        fill_send_write(p_ctx, 0, pid, stride);
        //set another process ctx
        debug("%d: use child(%d)process ctx hndl: %d\n", pid, cpid, ctx_hndl);
        for (i = 0; i< NUM_CMDS; i++)
        {
            p_ctx->cmd[i].rcb.ctx_id = ctx_hndl;
        }
        if (2 == cmd)
        {
            //another test is to close one of my ctx res hndl
            //and use child ctx handler here
            //(child has opened 2 res handler)
            p_ctx->res_hndl = res_hndl;
            close_res(p_ctx);
            debug("%d: close res_hndl(%d) but child (%d)has opened\n",
                  pid, res_hndl, cpid);
            for (i = 0; i< NUM_CMDS; i++)
            {
                p_ctx->cmd[i].rcb.res_hndl   = res_hndl;
            }
        }
        send_cmd(p_ctx);
        rc = wait_resp(p_ctx);
        kill(cpid, SIGKILL);
        pthread_cancel(thread);
    }
    return rc;
}
int mc_invalid_ioarcb(int cmd)
{
    int rc;
    struct ctx myctx;
    struct ctx *p_ctx = &myctx;
    __u64 chunks=32;
    __u64 actual_size=0;
    __u64 vlba =0;
    __u32 *p_u32;
    __u64 stride;
    __u64 *p_u64;
    pthread_t thread;
    mc_stat_t l_mc_stat;
    int i;

    pid = getpid();

    signal(SIGABRT, sig_handle);
    signal(SIGSEGV, sig_handle);
    rc = mc_init();
    CHECK_RC(rc, "mc_init failed");
    debug("mc_init success :%d\n",rc);

    rc = ctx_init(p_ctx);
    CHECK_RC(rc, "Context init failed");

    pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx);

    if (15 == cmd)
    {
        //PLBA out of range
        rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT);
        CHECK_RC(rc, "opening res_hndl");
        actual_size = (p_ctx->last_lba+1)/p_ctx->chunk_size;
    }
    else
    {
        p_ctx->flags = DK_UVF_ALL_PATHS;
        rc = create_res(p_ctx);
        CHECK_RC(rc, "opening res_hndl");
        rc = mc_size1(p_ctx,chunks, &actual_size);
        CHECK_RC(rc, "mc_size");
    }

    rc = mc_stat1(p_ctx, &l_mc_stat);
    CHECK_RC(rc, "mc_stat");
    stride  = 1 << l_mc_stat.nmask;

    vlba = (actual_size * (1 << l_mc_stat.nmask))-1;
    fill_send_write(p_ctx, vlba, pid, stride);
    for (i = 0; i < NUM_CMDS; i++)
    {
        if (1 == cmd)
        {
            //invalid upcode
            debug("invalid upcode(0xFA) action = %d\n",cmd);
            p_ctx->cmd[i].rcb.cdb[0] = 0xFA;
        }
        else if (2 == cmd)
        {
            //EA = NULL
            debug("EA = NULL action = %d\n",cmd);
            p_ctx->cmd[i].rcb.data_ea = (__u64)NULL;
#ifdef _AIX
            bad_address = true;
#endif
        }
        else if (3 == cmd)
        {
            //invalid flgas
            p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL;
            p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ;
            debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags);
        }
        else if (5 == cmd)
        {
            //SISL_AFU_RC_RHT_INVALID
            p_ctx->cmd[i].rcb.res_hndl   = p_ctx->res_hndl + 2;
        }
        else if ( 6 == cmd)
        {
            //SISL_AFU_RC_RHT_OUT_OF_BOUNDS
            p_ctx->cmd[i].rcb.res_hndl   = MAX_RES_HANDLE;
        }
        else if (7 == cmd)
        {
            //invalid address for page fault
            debug("setting EA = 0x1234 to generate error page fault\n");
            p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234;
#ifdef _AIX
            bad_address = true;
#endif
        }
        else if (8 == cmd)
        {
            //invalid ctx_id
            debug("%d:  sending invalid ctx id\n", pid);
            p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10;
        }
        else if (9 == cmd)
        {
            //test flag underrun
            p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2;
        }
        else if (10 == cmd)
        {
            // test flag overrun
            p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]);
            p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10];
            write_32(p_u32, 2);
        }
        else if (11 == cmd)
        {
            //rc scsi_rc_check
            p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10];
            write_32(p_u32, p_ctx->blk_len +1);
        }
        else if (12 == cmd)
        {
            //data len 0 in ioarcb
            p_ctx->cmd[i].rcb.data_len = 0;
        }
        else if (13 == cmd)
        {
            //NUM  BLK to write 0
            p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10];
            write_32(p_u32, 0);
        }
        else if ((14 == cmd) || (15 == cmd))
        {
            //test out of range LBAs
            p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2];
            vlba += i+1;
            write_lba(p_u64, vlba);
        }
    }

    //test BAD IOARCB, IOASA & CMD room violation
    if (cmd >= 100)
    {
        if (100 == cmd)
        {
            //bad RCB
            place_bad_addresses(p_ctx, 1);
            usleep(1000);
            if (err_afu_intrpt) //cool expected res
                rc = 100;
            else rc = -1;
            goto END;
        }
        else if (101 == cmd)
        {
            //bad IOASA
            handle_bad_ioasa(p_ctx, pid);
            usleep(1000); //sleep sometime to process rcb cmd by AFU
            //And let handle rrq event
            //how to handle error, rrq thread should throw some error
            return -1;
        }
        else if (102 == cmd)
        {
            //cmd_room violation
            place_bad_addresses(p_ctx, 3);
            usleep(1000);
#ifdef _AIX
            if (err_afu_intrpt) //cool expected res
                rc = 102;
            else rc = -1;
            goto END;
#endif
        }
        else if (103 == cmd)
        {
            //bad HRRQ
            place_bad_addresses(p_ctx, 2);
            usleep(1000);
            if (err_afu_intrpt) //cool expected res
                rc = 103;
            else rc = -1;
            goto END;
        }
    }
    else
    {
        send_cmd(p_ctx);
    }
    rc = wait_resp(p_ctx);
    if ( cmd >= 9 && cmd <= 13)
    {
        if (!rc_flags)
        {
            if (!dont_displa_err_msg)
                fprintf(stderr, "%d: Expecting rc flags non zero\n", pid);
            rc = -1;
        }
    }
    if (4 == cmd)
    {
        //invalid fc port & lun id
        debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd);
        fill_send_write(p_ctx, vlba, pid, stride);
        for (i = 0; i < NUM_CMDS; i++)
        {
            p_ctx->cmd[i].rcb.lun_id = 0x12000;
            p_ctx->cmd[i].rcb.port_sel = 0xff;
        }
        //send_single_cmd(p_ctx);
        send_cmd(p_ctx);
        rc = wait_resp(p_ctx);
    }
#ifdef _AIX
    if ((7 == cmd || 2 == cmd)&& (err_afu_intrpt))
        rc = 7;
#endif
END:
    pthread_cancel(thread);
    close_res(p_ctx);
    //mc_unregister(p_ctx->mc_hndl);
    //xerror:
    ctx_close(p_ctx);
    mc_term();
    return rc;
}
Beispiel #7
0
int mc_invalid_ioarcb(int cmd)
{
    int rc;
    struct ctx myctx;
    struct ctx *p_ctx = &myctx;
    __u64 chunks=16;
    __u64 actual_size=0;
    __u64 vlba =0;
	__u32 *p_u32;
	__u64 stride;
    pthread_t thread;
	mc_stat_t l_mc_stat;
	int i;

	rc = mc_init();
	CHECK_RC(rc, "mc_init failed");
    debug("mc_init success :%d\n",rc);

    rc = ctx_init(p_ctx);
	CHECK_RC(rc, "Context init failed");

    pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx);
    rc = mc_register(master_dev_path, p_ctx->ctx_hndl,
        (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl);
	CHECK_RC(rc, "ctx reg failed");

    rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl);
	CHECK_RC(rc, "opening res_hndl");

    rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size);
	CHECK_RC(rc, "mc_size");

	rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat);
	CHECK_RC(rc, "mc_stat");
	stride  = 1 << l_mc_stat.nmask;
	
    pid = getpid();
    vlba = (actual_size * (1 << l_mc_stat.nmask))-1;
	fill_send_write(p_ctx, vlba, pid, stride, VLBA);
	for(i = 0; i < NUM_CMDS; i++) {
	if (1 == cmd){ //invalid upcode
		debug("invalid upcode(0xFA) action = %d\n",cmd);
		p_ctx->cmd[i].rcb.cdb[0] = 0xFA;
	}else if (2 == cmd) {//EA = NULL
		debug("EA = NULL action = %d\n",cmd);
		p_ctx->cmd[i].rcb.data_ea = (__u64)NULL;
	}else if(3 == cmd){ //invalid flgas
		p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL;
		p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ;
		debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags);
	}else if(5 == cmd) {//SISL_AFU_RC_RHT_INVALID
		p_ctx->cmd[i].rcb.res_hndl   = p_ctx->res_hndl + 2;
	}else if( 6 == cmd) {//SISL_AFU_RC_RHT_OUT_OF_BOUNDS
		p_ctx->cmd[i].rcb.res_hndl   = MAX_RES_HANDLE;
	}else if(7 == cmd) { //invalid address for page fault
		debug("setting EA = 0x1234 to generate error page fault\n");
		p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234;
	}else if(8 == cmd) { //invalid ctx_id
		debug("%d :  sending invalid ctx id\n", pid);
		 p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10;
	}else if(9 == cmd) { //test flag underrun
		p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2;
	}else if(10 == cmd) {// test flag overrun
		p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]) +2;
	}else if(11 == cmd) { //rc scsi_rc_check
		p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10];
		write_32(p_u32, LBA_BLK +1);
	}else if(12 == cmd) { //data len 0 in ioarcb
		p_ctx->cmd[i].rcb.data_len = 0;
	}else if(13 == cmd) { //NUM  BLK to write 0
		p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10];
		write_32(p_u32, 0);
	}
	}
	//send_single_cmd(p_ctx);
	send_cmd(p_ctx);
	//rc = wait_single_resp(p_ctx);
	rc = wait_resp(p_ctx);
	if( cmd >= 9 && cmd <= 13) {
		if(!rc_flags) {
			if(!dont_displa_err_msg)
				fprintf(stderr, "%d : Expecting rc flags non zero\n", pid);
			rc = -1;
		}
	}
	if(4 == cmd) {//invalid fc port & lun id
		debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd);
		fill_send_write(p_ctx, vlba, pid, stride, PLBA);
		for(i = 0; i < NUM_CMDS; i++) {
			p_ctx->cmd[i].rcb.lun_id = 0x12000;
			p_ctx->cmd[i].rcb.port_sel = 0xff;
		}
		//send_single_cmd(p_ctx);
		send_cmd(p_ctx);
		rc = wait_resp(p_ctx);
	}
	pthread_cancel(thread);
	mc_close(p_ctx->mc_hndl,p_ctx->res_hndl);
	mc_unregister(p_ctx->mc_hndl);
	ctx_close(p_ctx);
	mc_term();
    return rc;
}