int test_spio_plun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 stride= 0x10000; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //for PLUN 2nd argument(lba_size) would be ignored rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); CHECK_RC(rc, "failed compare_size"); rc = do_io(p_ctx, stride); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
// 7.1.219 : Pass context token to different process & do detach/release. int test_detach_diff_proc() { int rc=0; int cstat; struct ctx myctx; struct ctx *p_ctx = &myctx; pid = getpid(); //ctx_init with default flash disk & devno rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); rc = fork(); if ( rc == -1 ) CHECK_RC(1, "fork() failed"); // child process if ( rc == 0 ) { pid = getpid(); rc = ctx_close(p_ctx); if ( 22 != rc ) CHECK_RC_EXIT(1, "Context detach did not fail"); exit(0); } else { // Probe child's exit status. if ( wait(&cstat) == -1 ) CHECK_RC(1, "Failed while wait() for child"); // We expect child to exit itself if (WIFEXITED(cstat)) { // We expect child to exit with rc 0 only ! if ( WEXITSTATUS(cstat) != 0 ) rc=1; else rc=0; } } rc |= ctx_close(p_ctx); return rc; }
// Test Case Starts here .......... !! void cleanup(struct ctx *p_ctx, pthread_t threadId) { debug("\n\n%d:**************** Start cleanup ****************\n",pid); // Useful for some -ve tests. NOOPs if thId is passed as -1. if ( -1 != threadId ) pthread_cancel(threadId); close_res(p_ctx); ctx_close(p_ctx); debug("%d:****************** End cleanup ******************\n",pid); }
int max_ctx_on_plun(int cmd) { int i; int rc = 0; struct ctx myctx; struct ctx *p_ctx=&myctx; pid = getpid(); pthread_t thread; int max_p = MAX_OPENS; for (i=0; i<max_p;i++) { if (0==fork()) { //child process pid = getpid(); debug("%d: ......process %d created...\n",pid,i); memset(p_ctx, 0, sizeof(myctx)); strcpy(p_ctx->dev, cflash_path); if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); exit(rc); } #ifdef _AIX rc |= ioctl_dk_capi_query_path(p_ctx); rc|=ctx_init_internal(p_ctx, 0, p_ctx->devno); #else rc|=ctx_init_internal(p_ctx, 0x2, p_ctx->devno); #endif if (2 == cmd) rc |=create_resource(p_ctx,0,0,LUN_VIRTUAL); if (3 == cmd) rc |=create_resource(p_ctx,0,0,LUN_DIRECT); if (4 == cmd) { //do io all vluns created on path_id_mask pthread_create(&thread, NULL,ctx_rrq_rx,p_ctx); rc |= create_resource(p_ctx,p_ctx->chunk_size,0,LUN_VIRTUAL); rc |= do_io(p_ctx,0x10); pthread_cancel(thread); } sleep(10); //lets all context get created if ( 1 != cmd ) rc|=close_res(p_ctx); rc|=ctx_close(p_ctx); debug("%d:.exiting with rc=%d\n",pid,rc); exit(rc); } } rc=wait4all(); return rc; }
void thread_create(Ctx *ctx) { pthread_t tid; int re; re = pthread_create(&tid, NULL, thread_callback, (void *)ctx); if (re != 0) { printf("call pthread_create errno:%d\n", re); //清理资源 ctx_close(ctx); } }
int test_scsi_cmds() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunk = 16; pthread_t thread; __u64 stride = 0x10; __u64 nlba; uint8_t opcode[]={ 0x00,0xA0,0x09E,0x12,0x03,0x1B,0x5A,0x55 }; int index; pid = getpid(); rc = ctx_init(p_ctx); int i; CHECK_RC(rc, "Context init failed"); pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); p_ctx->flags = DK_UVF_ALL_PATHS; p_ctx->lun_size = chunk * p_ctx->chunk_size; rc = create_res(p_ctx); CHECK_RC(rc, "create_res failed"); nlba = p_ctx->last_lba+1; for (index=0;index <sizeof(opcode);index++) { debug("%d:sending scsi cmd=0X%"PRIX8" ........\n",pid,opcode[index]); fill_send_write(p_ctx, nlba, pid, stride); for (i =0;i<NUM_CMDS;i++) { p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; p_ctx->cmd[i].rcb.cdb[0] = opcode[index]; } send_cmd(p_ctx); rc = wait_resp(p_ctx); #ifndef _AIX if (rc != 0x21) { fprintf(stderr,"%d:failed rc =%d for scsi cmd=0X%"PRIX8",exptd rc=0x21\n", pid,rc,opcode[index]); break; } #endif debug("%d:rc =%d for scsi cmd=0X%"PRIX8" ........\n",pid,rc,opcode[index]); usleep(1000); } pthread_cancel(thread); ctx_close(p_ctx); return rc; }
int test_large_transfer() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; pthread_t thread; struct rwlargebuf rwbuf; __u64 chunk=2; // do io on last 2 chunks on a plun __u64 buf_size[] = { 0x1000, //4KB 0x4000, //16KB 0x10000, //64KB 0x40000, //256KB 0x800000, //8MB 0x1000000 }; //16MB int i; //Large trasnfer size is for PLUN not Vluns(4K only) as per Jim pid = getpid(); #ifdef _AIX rc = setRUnlimited(); CHECK_RC(rc, "setRUnlimited failed"); #endif rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx,0,DK_UDF_ASSIGN_PATH,LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); p_ctx->st_lba= p_ctx->last_lba +1 -(chunk*p_ctx->chunk_size); if (long_run_enable) p_ctx->st_lba=0; //let do IO on complete plun for (i=0;i< sizeof(buf_size)/sizeof(__u64);i++) { rc = allocate_buf(&rwbuf, buf_size[i]); CHECK_RC(rc, "memory allocation failed"); printf("%d: do large io size=0X%"PRIX64"\n",pid, buf_size[i]); rc = do_large_io(p_ctx, &rwbuf, buf_size[i]); deallocate_buf(&rwbuf); if (rc) break; //get out from here } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int do_attach_detach(char *dev, dev64_t devno, __u16 lun_type) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunk = 20; __u64 nlba; int count = 20; char *str = getenv("LONG_RUN"); if (str != NULL) { count = 100000; printf("LONG_RUN enabled...loop=%d\n",count); fflush(stdout); } pid = getpid(); while (count-- >0) { rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); CHECK_RC(rc, "Context init failed"); if (LUN_VIRTUAL == lun_type) { chunk = rand()%16; //create 0 vlun size & later call resize ioctl rc = create_resource(p_ctx, chunk, DK_UVF_ALL_PATHS, lun_type); CHECK_RC(rc, "create LUN_VIRTUAL failed"); chunk = rand()%32; nlba = chunk * p_ctx->chunk_size; rc = vlun_resize(p_ctx, nlba); CHECK_RC(rc, "vlun_resize failed"); } else { rc = create_resource(p_ctx,0, DK_UDF_ASSIGN_PATH, lun_type); CHECK_RC(rc, "create LUN_DIRECT failed"); } close_res(p_ctx); ctx_close(p_ctx); if (count%500 == 0) printf("%d: loop remains....\n",count); fflush(stdout); } return 0; }
int child_mc_size_error(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 size=0; int invalid=0; pid = getpid(); rc =mc_init(); CHECK_RC(rc, "mc_init failed"); rc = ctx_init(p_ctx); CHECK_RC(rc, "ctx init failed"); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); CHECK_RC(rc, "ctx reg failed"); rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); CHECK_RC(rc, "opening res_hndl"); if(1 == cmd) { //invalid MCH rc = mc_size((mc_hndl_t)&invalid, p_ctx->res_hndl,1,&size); rc = rc ? 1:0; } else if( 2 == cmd) { //invalid RSH rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl+20,1,&size); rc = rc ? 2:0; } else if(3 == cmd) { //NULL size rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, NULL); rc = rc ? 3:0; } else if(4 == cmd) { //after mc_close mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, &size); rc = rc ? 4:0; } else if(5 == cmd) { //after mc_unregister mc_unregister(p_ctx->mc_hndl); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1, &size); rc = rc ? 5:0; } ctx_close(p_ctx); mc_term(); return rc; }
int test_large_trnsfr_boundary() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; pthread_t thread; struct rwlargebuf rwbuf; __u64 buf_size = 0x1000000; //16MB __u64 chunk = 10; pid = getpid(); #ifdef _AIX system("ulimit -d unlimited"); system("ulimit -s unlimited"); system("ulimit -m unlimited"); #endif rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //do RW last cmd with crossed LBA boundary //i.e. last_lba size is 0x100; //do send rw with 0x10 & cross limit of 0x100 rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); //to make sure last cmd rw beyond boundary p_ctx->st_lba = p_ctx->last_lba - (chunk * p_ctx->chunk_size); p_ctx->st_lba = p_ctx->st_lba +20 ; rc = do_large_io(p_ctx, &rwbuf, buf_size); deallocate_buf(&rwbuf); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int max_vlun_on_a_ctx() { int i; int rc; struct ctx myctx; struct ctx *p_ctx=&myctx; pid = getpid(); rc=ctx_init(p_ctx); __u64 vluns[MAX_VLUNS]; for (i=0;i<MAX_VLUNS;i++) { rc = create_resource(p_ctx,p_ctx->chunk_size,0,LUN_VIRTUAL); CHECK_RC(rc, "create_resource Failed\n"); vluns[i]=p_ctx->rsrc_handle; } for (i=0;i<MAX_VLUNS;i++) { p_ctx->rsrc_handle=vluns[i]; rc=close_res(p_ctx); CHECK_RC(rc, "close_res failed\n"); } rc = ctx_close(p_ctx); return rc; }
int mc_invalid_ioarcb(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunks=32; __u64 actual_size=0; __u64 vlba =0; __u32 *p_u32; __u64 stride; __u64 *p_u64; pthread_t thread; mc_stat_t l_mc_stat; int i; pid = getpid(); signal(SIGABRT, sig_handle); signal(SIGSEGV, sig_handle); rc = mc_init(); CHECK_RC(rc, "mc_init failed"); debug("mc_init success :%d\n",rc); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); if (15 == cmd) { //PLBA out of range rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "opening res_hndl"); actual_size = (p_ctx->last_lba+1)/p_ctx->chunk_size; } else { p_ctx->flags = DK_UVF_ALL_PATHS; rc = create_res(p_ctx); CHECK_RC(rc, "opening res_hndl"); rc = mc_size1(p_ctx,chunks, &actual_size); CHECK_RC(rc, "mc_size"); } rc = mc_stat1(p_ctx, &l_mc_stat); CHECK_RC(rc, "mc_stat"); stride = 1 << l_mc_stat.nmask; vlba = (actual_size * (1 << l_mc_stat.nmask))-1; fill_send_write(p_ctx, vlba, pid, stride); for (i = 0; i < NUM_CMDS; i++) { if (1 == cmd) { //invalid upcode debug("invalid upcode(0xFA) action = %d\n",cmd); p_ctx->cmd[i].rcb.cdb[0] = 0xFA; } else if (2 == cmd) { //EA = NULL debug("EA = NULL action = %d\n",cmd); p_ctx->cmd[i].rcb.data_ea = (__u64)NULL; #ifdef _AIX bad_address = true; #endif } else if (3 == cmd) { //invalid flgas p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags); } else if (5 == cmd) { //SISL_AFU_RC_RHT_INVALID p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl + 2; } else if ( 6 == cmd) { //SISL_AFU_RC_RHT_OUT_OF_BOUNDS p_ctx->cmd[i].rcb.res_hndl = MAX_RES_HANDLE; } else if (7 == cmd) { //invalid address for page fault debug("setting EA = 0x1234 to generate error page fault\n"); p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; #ifdef _AIX bad_address = true; #endif } else if (8 == cmd) { //invalid ctx_id debug("%d: sending invalid ctx id\n", pid); p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10; } else if (9 == cmd) { //test flag underrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2; } else if (10 == cmd) { // test flag overrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]); p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, 2); } else if (11 == cmd) { //rc scsi_rc_check p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, p_ctx->blk_len +1); } else if (12 == cmd) { //data len 0 in ioarcb p_ctx->cmd[i].rcb.data_len = 0; } else if (13 == cmd) { //NUM BLK to write 0 p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, 0); } else if ((14 == cmd) || (15 == cmd)) { //test out of range LBAs p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; vlba += i+1; write_lba(p_u64, vlba); } } //test BAD IOARCB, IOASA & CMD room violation if (cmd >= 100) { if (100 == cmd) { //bad RCB place_bad_addresses(p_ctx, 1); usleep(1000); if (err_afu_intrpt) //cool expected res rc = 100; else rc = -1; goto END; } else if (101 == cmd) { //bad IOASA handle_bad_ioasa(p_ctx, pid); usleep(1000); //sleep sometime to process rcb cmd by AFU //And let handle rrq event //how to handle error, rrq thread should throw some error return -1; } else if (102 == cmd) { //cmd_room violation place_bad_addresses(p_ctx, 3); usleep(1000); #ifdef _AIX if (err_afu_intrpt) //cool expected res rc = 102; else rc = -1; goto END; #endif } else if (103 == cmd) { //bad HRRQ place_bad_addresses(p_ctx, 2); usleep(1000); if (err_afu_intrpt) //cool expected res rc = 103; else rc = -1; goto END; } } else { send_cmd(p_ctx); } rc = wait_resp(p_ctx); if ( cmd >= 9 && cmd <= 13) { if (!rc_flags) { if (!dont_displa_err_msg) fprintf(stderr, "%d: Expecting rc flags non zero\n", pid); rc = -1; } } if (4 == cmd) { //invalid fc port & lun id debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd); fill_send_write(p_ctx, vlba, pid, stride); for (i = 0; i < NUM_CMDS; i++) { p_ctx->cmd[i].rcb.lun_id = 0x12000; p_ctx->cmd[i].rcb.port_sel = 0xff; } //send_single_cmd(p_ctx); send_cmd(p_ctx); rc = wait_resp(p_ctx); } #ifdef _AIX if ((7 == cmd || 2 == cmd)&& (err_afu_intrpt)) rc = 7; #endif END: pthread_cancel(thread); close_res(p_ctx); //mc_unregister(p_ctx->mc_hndl); //xerror: ctx_close(p_ctx); mc_term(); return rc; }
int child_mc_reg_error(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 *map=(__u64 *)0xabcdf; __u64 actual_size=0; __u64 stride; __u64 st_lba =0; __u64 nlba; mc_hndl_t new_mc_hndl, dup_mc_hndl; int rc1, rc2, rc3, rc4, rc5; pthread_t thread; mc_stat_t l_mc_stat; __u64 size = 128; if(mc_init() !=0 ) { fprintf(stderr, "mc_init failed.\n"); return -1; } debug("mc_init success.\n"); rc = ctx_init(p_ctx); if(rc != 0) { fprintf(stderr, "Context init failed, errno %d\n", errno); return -1; } pid = getpid(); if(1 == cmd) //mc_reg with NULL MMIOP { pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, NULL,&p_ctx->mc_hndl); if(rc) return rc; rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); if(rc) return rc; rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); if(rc) return rc; rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); if(rc) return rc; rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); if(rc) return rc; pid = getpid(); stride = (1 << l_mc_stat.nmask); st_lba = (actual_size * (1 << l_mc_stat.nmask))-1; rc = send_write(p_ctx, st_lba ,stride, pid, VLBA); if(rc) return rc; rc = send_read(p_ctx, st_lba ,stride, VLBA); if(rc) return rc; rc = rw_cmp_buf(p_ctx, st_lba); rc = rc ? 1:0; } else if(2 == cmd) //NULL device path { rc = mc_register(NULL, p_ctx->ctx_hndl, (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); rc = rc ? 2:0; } else if(3 == cmd) //with afu_path device { rc = mc_register(afu_path, p_ctx->ctx_hndl, (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); rc = rc ? 3:0; } else if(4 == cmd) //with invalid device path { rc = mc_register("/dev/cxl/afu50.0m", p_ctx->ctx_hndl, (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); rc = rc ? 4:0; } else if(5 == cmd) //with invalid ctx hndl(not assigned) { debug("actual ctx hndl :%d\n", p_ctx->ctx_hndl); p_ctx->ctx_hndl = p_ctx->ctx_hndl + 4; debug("invalid ctx hndl :%d\n", p_ctx->ctx_hndl); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *) p_ctx->p_host_map,&p_ctx->mc_hndl); rc = rc ? 5:0; } else if(6 == cmd) //with invalid mmap adress { rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)map,&p_ctx->mc_hndl); rc = rc ? 6:0; } else if(7 == cmd) //twice mc_reg { pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); CHECK_RC(rc, "mc_register"); rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); CHECK_RC(rc, "mc_open"); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); CHECK_RC(rc, "mc_size"); rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); //twice mc_register on same ctx rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &new_mc_hndl); //send write on 1st mc hndl rc1 = send_single_write(p_ctx, 0, pid); //do mc_size & open on old mc_reg rc2 = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); rc3 = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); rc4 = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); rc5 = mc_hdup(p_ctx->mc_hndl, &dup_mc_hndl); debug("mc_hdup rc is : %d\n", rc5); //now do mc_unreg on old one rc = mc_unregister(p_ctx->mc_hndl); CHECK_RC(rc, "mc_unregister"); //do everything on new mc hndl p_ctx->mc_hndl = new_mc_hndl; rc = mc_open(p_ctx->mc_hndl, MC_RDWR, &p_ctx->res_hndl); CHECK_RC(rc, "mc_open"); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, size, &actual_size); CHECK_RC(rc, "mc_size"); rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); nlba = l_mc_stat.size * (1 << l_mc_stat.nmask); stride = 1 << l_mc_stat.nmask; for(st_lba = 0; st_lba < nlba; st_lba += (stride * NUM_CMDS)) { rc = send_write(p_ctx, st_lba, stride, pid, VLBA); CHECK_RC(rc, "send_write"); } if(rc1 && rc2 && rc3 && rc4 && rc5) { rc = 7; } pthread_cancel(thread); mc_unregister(p_ctx->mc_hndl); } else if(8 == cmd) //mc_reg twice from 2 diff process { if(fork() == 0) {//mc_reg in child process as well pid = getpid(); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); sleep(1); rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); if(!rc) { fprintf(stderr, "%d : mc_open should fail rc = %d\n", pid, rc); exit(-1); } else { debug("%d : mc_open failed as expectd\n", pid); } rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); if(!rc) { fprintf(stderr, "%d : mc_size should fail rc = %d\n", pid, rc); exit(-1); } else { debug("%d : mc_size failed as expectd\n", pid); } rc = rc ? 8:0; exit(rc); } else { sleep(1); //let child proc cal mc_reg 1str rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); CHECK_RC(rc, "mc_register"); pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); CHECK_RC(rc, "mc_open"); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl, 2, &actual_size); CHECK_RC(rc, "mc_mc_size"); rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); CHECK_RC(rc, "mc_stat"); st_lba = (actual_size * (1 << l_mc_stat.nmask))-1; rc += send_single_write(p_ctx, st_lba, pid); wait(&rc); pthread_cancel(thread); if (WIFEXITED(rc)) { rc = WEXITSTATUS(rc); rc = rc ? 8:0; } mc_unregister(p_ctx->mc_hndl); } } ctx_close(p_ctx); if(9 == cmd) //mc_reg with closed ctx { pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); printf("calling mc_reg api after ctx close..\n"); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map, &p_ctx->mc_hndl); rc = rc ? 9:0; } mc_term(); return rc; }
void thread_clear(void *ctx) { ctx_close((Ctx *) ctx); }
//int create_res_hndl_afu_reset(char *dev, dev64_t devno, __u64 chunk) int create_res_hndl_afu_reset(bool do_recover, bool last) { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; //int i; pthread_t thread; __u64 chunk = 0x1; __u64 stride= 0x1; int msgid; struct mymsgbuf msg_buf; pthread_t ioThreadId; do_io_thread_arg_t ioThreadData; do_io_thread_arg_t * p_ioThreadData=&ioThreadData; // we have to export "NO_IO; if we want to avoid IO char * noIOP = getenv("NO_IO"); pid = getpid(); #ifdef _AIX memset(p_ctx,0,sizeof(my_ctx)); strcpy(p_ctx->dev,cflash_path); if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open failed %s, errno %d\n",p_ctx->dev, errno); return -1; } rc = ioctl_dk_capi_query_path(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_query_path failed...\n"); rc = ctx_init_internal(p_ctx, 0, p_ctx->devno); #else rc = ctx_init(p_ctx); #endif CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events if ( noIOP == NULL ) pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //create 0 vlun size & later call resize ioctl rc = create_resource(p_ctx, chunk * (p_ctx->chunk_size), 0, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); //last new process send message to waiting process //that new ctx created now you can try to reattach msgid = msgget(key, IPC_CREAT | 0666); if (msgid < 0 ) { fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); return -1; } memset(&msg_buf, 0, sizeof(struct mymsgbuf)); if (last) { goto end; } if ( noIOP == NULL ) { p_ioThreadData->p_ctx=p_ctx; p_ioThreadData->stride=stride; p_ioThreadData->loopCount=0x100000; // Need this to go on 10 secs debug("%d: things look good, doing IO...\n",pid); rc =pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); CHECK_RC(rc, "do_io_thread() pthread_create failed"); } #ifdef _AIX rc = do_eeh(p_ctx); #else rc = do_poll_eeh(p_ctx); #endif g_error=0; //reset any prev error might caught while EEH if ( noIOP == NULL ) { pthread_join(ioThreadId, NULL); } #ifndef _AIX //for linux if ( noIOP == NULL ) pthread_cancel(thread); #endif //We here after EEH done if (do_recover) { //do if recover true debug("%d: woow EEH is done recovering...\n",pid); rc = ioctl_dk_capi_recover_ctx(p_ctx); CHECK_RC(rc, "ctx reattached failed"); msg_buf.mtype =2; strcpy(msg_buf.mtext, "K"); if (msgsnd(msgid, &msg_buf, 2, IPC_NOWAIT) < 0) { fprintf(stderr, "%d: msgsnd failed\n", pid); return -1; } #ifdef _AIX if (p_ctx->return_flags != DK_RF_REATTACHED) CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; #endif fflush(stdout); ctx_reinit(p_ctx); #ifdef _AIX p_ctx->hint=DK_HINT_SENSE; #else p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; // if dummy_sense_flag is set; // a dummy sense data will be copied into ioctl input p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed"); #ifndef _AIX //for linux pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); #endif } else { //last one is /*msgid = msgget(key, IPC_CREAT | 0666); if(msgid < 0 ){ fprintf(stderr, "%d: msgget() failed before msgrcv()\n", pid); return -1; } debug("%d: Going to wait at msgrcv()..\n", pid); fflush(stdout); if(msgrcv(msgid, &msg_buf, 2, 1, 0) < 0) { fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); return -1; } debug("%d: Got out of msgrcv()..EEH is done, Try to recover....\n",pid); */ //as per today(9/28/2015) discussion with Sanket that //new attach will fail until holding context not exited //hope same apply for Linux as well return 100; /*rc = ioctl_dk_capi_recover_ctx(p_ctx); if(rc) return 100; //this to make sure recover failed else { fprintf(stderr,"%d:com'on recover should fail here...\n",pid); return 1; // we don't want to try IO anyway }*/ } end: if ( noIOP == NULL ) { stride=0x1; rc = do_io(p_ctx, stride); CHECK_RC(rc, "IO failed after EEH/recover"); } if ( noIOP == NULL ) pthread_cancel(thread); sleep(1); fflush(stdout); sleep(5); // additional time to be safe ! rc=close_res(p_ctx); sleep(5); // Don't let child exit to keep max ctx alive rc |= ctx_close(p_ctx); CHECK_RC(rc,"ctx close or close_res failed\n"); return rc; }
int no_recover_and_ioctl() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; //__u64 flags; pthread_t thread; __u64 chunk = 0x1; __u64 stride= 0x1; pthread_t ioThreadId; #ifdef _AIX //these are unused on Linux int msgid; struct mymsgbuf msg_buf; #endif do_io_thread_arg_t ioThreadData; do_io_thread_arg_t * p_ioThreadData=&ioThreadData; char * noIOP = getenv("NO_IO"); pid = getpid(); printf("%d:no_recover_and_ioctl process created...\n",pid); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); if ( noIOP == NULL ) { //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); p_ioThreadData->p_ctx=p_ctx; p_ioThreadData->stride=stride; p_ioThreadData->loopCount=100; rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); CHECK_RC(rc, "do_io_thread() pthread_create failed"); } #ifdef _AIX rc = do_eeh(p_ctx); #else rc = do_poll_eeh(p_ctx); #endif if ( noIOP == NULL ) { pthread_join(ioThreadId, NULL); } if ( noIOP == NULL ) pthread_cancel(thread); #ifdef _AIX msgid = msgget(key, IPC_CREAT | 0666); if (msgid < 0 ) { fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); return -1; } if (msgrcv(msgid, &msg_buf, 2, 2, 0) < 0) { fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); return -1; } sleep(1); rc = create_resource(p_ctx, p_ctx->chunk_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); rc |= vlun_resize(p_ctx, 2*p_ctx->chunk_size); rc |= close_res(p_ctx); rc |= ctx_close(p_ctx); #else // For the lost context, we will create another new. rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), DK_UVF_ALL_PATHS, LUN_VIRTUAL); pthread_cancel(thread); #endif return rc; }
int child_mc_xlate_error(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; int invalid=0; __u64 plba; __u64 size; mc_stat_t l_mc_stat; if(mc_init() !=0 ) { fprintf(stderr, "mc_init failed.\n"); return -1; } debug("mc_init success.\n"); rc = ctx_init(p_ctx); if(rc != 0) { fprintf(stderr, "Context init failed, errno %d\n", errno); return -1; } rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); if(rc != 0) { fprintf(stderr, "mc_register: failed. ctx_hndl %d, rc %d\n",p_ctx->ctx_hndl, rc ); return -1; } rc = mc_open(p_ctx->mc_hndl,MC_RDWR,&p_ctx->res_hndl); if(rc != 0) { fprintf(stderr, "ctx: %d:mc_open: failed,rc %d\n", p_ctx->ctx_hndl,rc); return -1; } rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); CHECK_RC(rc, "mc_stat"); if(1 == cmd) //without mc_size { rc = mc_xlate_lba(p_ctx->mc_hndl, p_ctx->res_hndl, 0,&plba); rc = rc ? 1:0; } else { rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,1,&size); if(2 == cmd) //MCH NULL { rc = mc_xlate_lba(NULL,p_ctx->res_hndl,0,&plba); debug("MCH NULL rc = %d\n",rc); rc = rc ? 2:0; } else if(3 == cmd) //invalid RCH { rc = mc_xlate_lba(p_ctx->mc_hndl,(p_ctx->res_hndl +4),0,&plba); rc = rc ? 3:0; } else if(4 == cmd) //invalid VLBA { rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,((1 << l_mc_stat.nmask)+5),&plba); rc = rc ? 4:0; } else if(5 == cmd) //NULL to plba { rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,0,NULL); rc = rc ? 5:0; } else if(6 == cmd) //diff MCH(no mc_open) & RCH with mc_size { struct ctx tctx; struct ctx *p_tctx= &tctx; rc = ctx_init(p_tctx); rc = mc_register(master_dev_path, p_tctx->ctx_hndl, (volatile __u64 *)p_tctx->p_host_map,&p_tctx->mc_hndl); rc = mc_open(p_tctx->mc_hndl,MC_RDWR,&p_tctx->res_hndl); rc = mc_xlate_lba(p_tctx->mc_hndl,p_ctx->res_hndl,0,&plba); rc = rc ? 6:0; mc_close(p_tctx->mc_hndl,p_tctx->res_hndl); mc_unregister(p_tctx->mc_hndl); ctx_close(p_tctx); } else if(7 == cmd) //invaliud MCH { rc = mc_xlate_lba((mc_hndl_t)&invalid,p_ctx->res_hndl,0,&plba); rc = rc ? 7:0; } } mc_close(p_ctx->mc_hndl, p_ctx->res_hndl); if(8 == cmd) //after mc_close { rc = mc_xlate_lba(p_ctx->mc_hndl,p_ctx->res_hndl,0,&plba); rc = rc ? 8:0; } mc_unregister(p_ctx->mc_hndl); ctx_close(p_ctx); mc_term(); return rc; }
int mc_invalid_ioarcb(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunks=16; __u64 actual_size=0; __u64 vlba =0; __u32 *p_u32; __u64 stride; pthread_t thread; mc_stat_t l_mc_stat; int i; rc = mc_init(); CHECK_RC(rc, "mc_init failed"); debug("mc_init success :%d\n",rc); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); rc = mc_register(master_dev_path, p_ctx->ctx_hndl, (volatile __u64 *)p_ctx->p_host_map,&p_ctx->mc_hndl); CHECK_RC(rc, "ctx reg failed"); rc = mc_open(p_ctx->mc_hndl,MC_RDWR, &p_ctx->res_hndl); CHECK_RC(rc, "opening res_hndl"); rc = mc_size(p_ctx->mc_hndl, p_ctx->res_hndl,chunks, &actual_size); CHECK_RC(rc, "mc_size"); rc = mc_stat(p_ctx->mc_hndl, p_ctx->res_hndl, &l_mc_stat); CHECK_RC(rc, "mc_stat"); stride = 1 << l_mc_stat.nmask; pid = getpid(); vlba = (actual_size * (1 << l_mc_stat.nmask))-1; fill_send_write(p_ctx, vlba, pid, stride, VLBA); for(i = 0; i < NUM_CMDS; i++) { if (1 == cmd){ //invalid upcode debug("invalid upcode(0xFA) action = %d\n",cmd); p_ctx->cmd[i].rcb.cdb[0] = 0xFA; }else if (2 == cmd) {//EA = NULL debug("EA = NULL action = %d\n",cmd); p_ctx->cmd[i].rcb.data_ea = (__u64)NULL; }else if(3 == cmd){ //invalid flgas p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags); }else if(5 == cmd) {//SISL_AFU_RC_RHT_INVALID p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl + 2; }else if( 6 == cmd) {//SISL_AFU_RC_RHT_OUT_OF_BOUNDS p_ctx->cmd[i].rcb.res_hndl = MAX_RES_HANDLE; }else if(7 == cmd) { //invalid address for page fault debug("setting EA = 0x1234 to generate error page fault\n"); p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; }else if(8 == cmd) { //invalid ctx_id debug("%d : sending invalid ctx id\n", pid); p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10; }else if(9 == cmd) { //test flag underrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2; }else if(10 == cmd) {// test flag overrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]) +2; }else if(11 == cmd) { //rc scsi_rc_check p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, LBA_BLK +1); }else if(12 == cmd) { //data len 0 in ioarcb p_ctx->cmd[i].rcb.data_len = 0; }else if(13 == cmd) { //NUM BLK to write 0 p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, 0); } } //send_single_cmd(p_ctx); send_cmd(p_ctx); //rc = wait_single_resp(p_ctx); rc = wait_resp(p_ctx); if( cmd >= 9 && cmd <= 13) { if(!rc_flags) { if(!dont_displa_err_msg) fprintf(stderr, "%d : Expecting rc flags non zero\n", pid); rc = -1; } } if(4 == cmd) {//invalid fc port & lun id debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd); fill_send_write(p_ctx, vlba, pid, stride, PLBA); for(i = 0; i < NUM_CMDS; i++) { p_ctx->cmd[i].rcb.lun_id = 0x12000; p_ctx->cmd[i].rcb.port_sel = 0xff; } //send_single_cmd(p_ctx); send_cmd(p_ctx); rc = wait_resp(p_ctx); } pthread_cancel(thread); mc_close(p_ctx->mc_hndl,p_ctx->res_hndl); mc_unregister(p_ctx->mc_hndl); ctx_close(p_ctx); mc_term(); return rc; }
int test_dcqexp_ioctl(int cnum) { DEBUG_MORE("inside test_dcqexp_ioctl"); int rc=0; #ifdef _AIX __u64 stride=0x10; struct ctx u_ctx; struct exceptionPacket excpVar; struct exceptionPacket * excpPrt =&excpVar; pthread_t thread_intr; uint64_t verify_exception; struct ctx *p_ctx = &u_ctx; struct ctx *p_ctx_backup = &u_ctx; struct dk_capi_exceptions exceptions; char errorMsg[MSG_LENGTH]; pthread_t thread; pthread_mutexattr_t mattrVar; pthread_condattr_t cattrVar; pthread_mutexattr_init(&mattrVar); pthread_condattr_init(&cattrVar); pthread_mutex_init(&excpPrt->mutex , &mattrVar); pthread_cond_init(&excpPrt->cv , &cattrVar); __u64 chunk =0; __u64 nlba =0; memset(p_ctx, 0, sizeof(struct ctx)); memset(excpPrt, 0, sizeof(struct exceptionPacket)); memset(errorMsg, 0, MSG_LENGTH+1); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread_intr, NULL, ctx_rrq_rx, p_ctx); excpPrt->excpCtx = p_ctx ; /* Started do_poll_for_event thread until desired exception generated*/ pthread_create(&thread,NULL,do_poll_for_event, excpPrt); sleep(5); // its rare but still avoiding race condition switch (cnum) { case EXCP_VLUN_DISABLE: // 7.1.230 nlba = p_ctx->last_phys_lba + 1; rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); debug(" ----------- Please unmap disk from the host now -------\n"); *p_ctx_backup=*p_ctx; debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); do { rc = do_io(p_ctx, stride); if (rc !=0) { debug("rc=%d,IO failed..... bye from loop\n",rc); break; } else { debug("rc=%d,IO succeeded \n",rc); } *p_ctx=*p_ctx_backup; }while ( rc ==0); g_error=0; p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "dk_capi_verify FAILED\n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_PATH_LOST|DK_CE_VERIFY_IN_PROGRESS; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ debug("%d: expected : 0x%llx and recieved : 0x%llx\n", pid, verify_exception, p_ctx->exceptions); strcpy(errorMsg, "Fail:EXCP_VLUN_DISABLE:bad excp"); goto xerror; } break ; case EXCP_PLUN_DISABLE: // 7.1.230 rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); debug(" ----------- Please unmap disk from the host now -------\n"); *p_ctx_backup=*p_ctx; debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); do { rc = do_io(p_ctx, stride); if (rc !=0) { debug("rc=%d,IO failed..... bye from loop\n",rc); break; } else { debug("rc=%d,IO succeeded \n",rc); } *p_ctx=*p_ctx_backup; }while ( rc ==0); g_error=0; p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "dk_capi_verify FAILED\n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_PATH_LOST|DK_CE_VERIFY_IN_PROGRESS; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_VLUN_DISABLE:bad excp"); goto xerror; } break ; case EXCP_VLUN_VERIFY: // 7.1.232 //7.1.225 chunk = 0x10; rc = create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); nlba = chunk * (p_ctx->chunk_size); rc = vlun_resize(p_ctx, nlba); //TBD input need to check once // Heading for verification using ioctl p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; //strcpy(p_ctx->sense_data,"TBD"); rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } // reset the flag exceptionDoneFlag=0; pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_VLUN_VERIFY:bad excp"); goto xerror; } break; case EXCP_PLUN_VERIFY : // 7.1.232 // 7.1.225 rc=ioctl_dk_capi_udirect(p_ctx); CHECK_RC(rc, "PLUN resource failed \n"); //TBD input need to check once // Heading for verification using ioctl p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } // reset the flag exceptionDoneFlag=0; pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_VLUN_VERIFY:bad excp"); goto xerror; } break ; case EXCP_VLUN_INCREASE : //7.1.231 rc = create_resource(p_ctx, 0, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); // Just increasing by 10 chunk nlba = 10 * (p_ctx->chunk_size); rc = vlun_resize(p_ctx, nlba); CHECK_RC(rc, "vlun_resize failedi\n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); if ( p_ctx->exceptions != DK_CE_SIZE_CHANGE ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_PLUN_VERIFY:bad excp"); goto xerror; } break; case EXCP_VLUN_REDUCE : //7.1.233 // taking all the vlun nlba = p_ctx->last_phys_lba + 1; rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); debug("---------- Waiting at poll().. Please decrease Disk size in texan box -----\n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); if ( p_ctx->exceptions != DK_CE_VLUN_TRUNCATED) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_PLUN_VERIFY:bad excp"); goto xerror; } break; case EXCP_VLUN_UATTENTION : // going to manual 7.1.234 nlba = p_ctx->last_phys_lba + 1; rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); p_ctx_backup=p_ctx; debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); do { rc = do_io(p_ctx, stride); if (rc !=0) { debug("rc=%d,IO failed..... bye from loop\n",rc); break; } else { debug("rc=%d,IO succeeded \n",rc); } p_ctx=p_ctx_backup; }while ( rc ==0); g_error=0; p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; rc = ioctl_dk_capi_verify(p_ctx); debug("rc = %d , g_error =%d\n",rc,g_error); CHECK_RC(rc, "dk_capi_verify FAILED\n"); debug(" -------- I am waiting at poll() for POLLPRI ---------- \n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); if ( p_ctx->exceptions != (DK_CE_UA_RECEIVED|DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE) ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_VLUN_ATTENTION:bad excp"); goto xerror; } break; case EXCP_PLUN_UATTENTION : rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); p_ctx_backup=p_ctx; debug(" ------ Let the I/O start and then do UA stuff at texan--------\n"); do { rc = do_io(p_ctx, stride); if (rc !=0) { debug("rc=%d,IO failed..... bye from loop\n",rc); break; } else { debug("rc=%d,IO succeeded \n",rc); } p_ctx=p_ctx_backup; }while ( rc ==0); g_error=0; p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; rc = ioctl_dk_capi_verify(p_ctx); debug("rc = %d , g_error =%d\n",rc,g_error); CHECK_RC(rc, "dk_capi_verify FAILED\n"); debug(" -------- I am waiting at poll() for POLLPRI ---------- \n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); if ( p_ctx->exceptions != (DK_CE_UA_RECEIVED|DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE) ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_PLUN_UATTENTION:bad excp"); goto xerror; } break; case EXCP_EEH_SIMULATION : // 7.1.229 rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "Create resource failed \n"); rc = do_eeh(p_ctx); CHECK_RC(rc, "do_eeh() failed"); p_ctx->flags = DK_VF_HC_TUR; rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "failed : ioctl_dk_capi_verify()"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ADAPTER; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_ADAPTER_EXCEPTION|DK_CE_VERIFY_IN_PROGRESS | DK_CE_VERIFY_SUCCEEDED ; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); goto xerror; } // EEH code is still not tested if ( p_ctx->adap_except_count != 0 ) { rc=255; // Non zero rc value strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); goto xerror; } if ( p_ctx->adap_except_type != DK_AET_EEH_EVENT|DK_AET_BAD_PF|DK_AET_AFU_ERROR ) { rc=255; strcpy(errorMsg, "Fail:EXCP_EEH_SIMULATION:bad excp"); goto xerror; } break; case EXCP_DISK_INCREASE : //7.1.226 rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); debug("---------- Please increase Disk size in texan box -----\n"); debug("---------- You have 15 secs to do that -----\n"); sleep(15); debug("---------- Sleep over. Moving on... -----\n"); p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = 0; rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "dk_capi_verify FAILED\n"); pthread_mutex_lock( &excpPrt->mutex ); while ( exceptionDoneFlag!=1) { pthread_cond_wait(&excpPrt->cv,&excpPrt->mutex); } pthread_mutex_unlock(&excpPrt->mutex); p_ctx->flags=DK_QEF_ALL_RESOURCE; rc = ioctl_dk_capi_query_exception(p_ctx); CHECK_RC(rc, "dk_capi_query FAILED\n"); verify_exception=DK_CE_VERIFY_IN_PROGRESS|DK_CE_VERIFY_SUCCEEDED|DK_CE_SIZE_CHANGE; if ( p_ctx->exceptions != verify_exception ) { rc=255; /* Non zero rc value */ debug("%d: expected : 0x%llx and recieved : 0x%llx\n", pid, verify_exception, p_ctx->exceptions); strcpy(errorMsg, "Fail:EXCP_DISK_INCREASE:bad excp"); goto xerror; } break; default: rc = -1; break; } xerror: pthread_mutexattr_destroy(&mattrVar); pthread_condattr_destroy(&cattrVar); pthread_cancel(thread); pthread_cancel(thread_intr); close_res(p_ctx); ctx_close(p_ctx); CHECK_RC(rc, errorMsg); #endif return rc; }
int test_spio_vlun(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 chunk = 0x10; __u64 nlba; __u64 stride=0x10000; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); if (3 == cmd) { //IO ON NO RES expect AFURC p_ctx->last_lba = chunk * p_ctx->chunk_size -1; rc = do_io(p_ctx, stride); pthread_cancel(thread); ctx_close(p_ctx); return rc; } //create 0 vlun size & later call resize ioctl if (1 == cmd) { //0 size debug("%d: create VLUN with 0 size\n", pid); rc = create_resource(p_ctx, 0, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); #ifdef _AIX rc = compare_size(p_ctx->last_lba, 0); #else rc = compare_size(p_ctx->last_lba, -1); #endif CHECK_RC(rc, "failed compare_size"); p_ctx->last_lba=0xFFFF; rc = do_io(p_ctx,stride); if (rc != 0x13 ) { CHECK_RC(1,"IO should fail with afu_rc=0x13\n"); } else { fprintf(stderr, "IO failed as expected, don't worry....\n"); g_error=0; rc=0; } } else { nlba = 1 * (p_ctx->chunk_size); rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); } nlba = chunk * (p_ctx->chunk_size); rc = vlun_resize(p_ctx, nlba); CHECK_RC(rc, "vlun_resize failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); //i would like to write/read all lbas //stride = p_ctx->blk_len; rc |= do_io(p_ctx, stride); rc |= vlun_resize(p_ctx, 0); rc |= vlun_resize(p_ctx, nlba); rc |= do_io(p_ctx, stride); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); rc |= g_error; return rc; }
int test_dcqexp_invalid(int cnum) { int imFailed=0; #ifdef _AIX int rc=0; struct ctx u_ctx; struct ctx *p_ctx = &u_ctx; char errorMsg[MSG_LENGTH]; struct dk_capi_exceptions exceptions; __u64 chunk = 10; __u64 nlba = chunk * NUM_BLOCKS; memset(p_ctx, 0, sizeof(struct ctx)); memset(errorMsg, 0, MSG_LENGTH+1); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); switch (cnum) { case EXCP_INVAL_DEVNO : rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); p_ctx->flags=DK_QEF_ALL_RESOURCE; exceptions.version = p_ctx->version; exceptions.ctx_token = p_ctx->ctx_hndl; exceptions.rsrc_handle = p_ctx->res_hndl; exceptions.flags =p_ctx->flags; exceptions.devno=0x0000FFFF ; // invalid dev no rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); if (rc == 0 ) { imFailed = 1 ; strcpy(errorMsg, "Fail:EXCP_INVAL_DEVNO "); goto xerror ; } break; case EXCP_INVAL_CTXTKN : rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "Create resource failed \n"); p_ctx->flags=DK_QEF_ALL_RESOURCE; exceptions.version = p_ctx->version; exceptions.ctx_token = 0x0000FFFF; // invalid context no exceptions.rsrc_handle = p_ctx->res_hndl; exceptions.flags =p_ctx->flags; exceptions.devno=p_ctx->devno ; rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); if (rc == 0 ) { imFailed = 1 ; strcpy(errorMsg, "Fail:EXCP_INVAL_CTXTKN "); goto xerror ; } break; case EXCP_INVAL_RSCHNDL : rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "Create resource failed \n"); exceptions.version = p_ctx->version; exceptions.ctx_token = p_ctx->ctx_hndl; exceptions.rsrc_handle = p_ctx->res_hndl; // this no more valid exceptions.flags =p_ctx->flags; exceptions.devno=p_ctx->devno ; rc = ioctl(p_ctx->fd, DK_CAPI_QUERY_EXCEPTIONS, &exceptions); if (rc == 0 ) { imFailed = 1 ; strcpy(errorMsg, "Fail:EXCP_INVAL_CTXTKN "); goto xerror ; } break ; default: rc = -1; break; } xerror : close_res(p_ctx); ctx_close(p_ctx); CHECK_RC(imFailed , errorMsg); #endif return imFailed; }
int test_fc_port_reset_vlun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; int ioCounter=0; __u64 nlba; __u64 stride=0x1; pid = getpid(); #ifdef _AIX memset(p_ctx, 0, sizeof(myctx)); strcpy(p_ctx->dev, cflash_path); if ((p_ctx->fd =open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open %s failed, errno=%d\n",p_ctx->dev,errno); return -1; } rc = ioctl_dk_capi_query_path(p_ctx); CHECK_RC(rc,"dk_capi_query_path failed..\n"); rc = ctx_init_internal(p_ctx, 0,p_ctx->devno); #else rc = ctx_init(p_ctx); #endif CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); nlba = 1 * (p_ctx->chunk_size); rc = create_resource(p_ctx, nlba, 0, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); debug("-- Going to start IO.Please do chportfc -reset <pnum> at texan --\n"); debug("rc=%d,g_error=%d\n",rc,g_error); do { rc = do_io(p_ctx, stride); if (rc !=0 ) { debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); if ( ioCounter==1 ) { debug("rc=%d, Going to verify.... \n",rc); p_ctx->flags=DK_VF_LUN_RESET; #ifdef _AIX p_ctx->hint = DK_HINT_SENSE; #else p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); } else { if (ioCounter > 1) { rc=-1; // IO failed third time break; } } } else { debug("rc=%d,IO succeeded \n",rc); g_error=0; } ioCounter++; rc|=g_error; sleep(3); } while ( rc !=0); debug("rc=%d,g_error=%d\n",rc,g_error); if ( ioCounter <= 1) { debug("WARNING: Test case not excuted properly... Please rerun\n"); rc =255; } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); rc |= g_error; return rc; }
int test_clone_ioctl(int cmd) { struct ctx myctx; int i; pid_t cpid; struct ctx *p_ctx=&myctx; uint64_t nlba; uint64_t st_lba; uint64_t stride=0x1000; int rc=0; uint64_t src_ctx_id; uint64_t src_adap_fd; pthread_t thread; uint64_t resource[MAX_RES_HANDLE]; uint64_t RES_CLOSED=-1; int cl_index[5]={ 1,7,10,12,15 }; pid = getpid(); rc =ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); p_ctx->flags = DK_UVF_ALL_PATHS; for (i=0;i<MAX_RES_HANDLE;i++) { p_ctx->lun_size = (i+1)*p_ctx->chunk_size; rc = create_res(p_ctx); CHECK_RC(rc, "create res failed"); resource[i]=p_ctx->rsrc_handle; } for (i=0;i<5;i++) { p_ctx->rsrc_handle= resource[cl_index[i]]; close_res(p_ctx); resource[cl_index[i]]= RES_CLOSED; } for (i=0; i<MAX_RES_HANDLE;i++) { if (RES_CLOSED == resource[i]) continue; nlba = (i+1)*p_ctx->chunk_size; p_ctx->rsrc_handle = resource[i]; p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; for (st_lba=0;st_lba<nlba;st_lba += (NUM_CMDS*stride)) { rc = send_write(p_ctx,st_lba,stride,pid); CHECK_RC(rc, "send write failed\n"); } } //write done cancel thread now pthread_cancel(thread); cpid = fork(); if (cpid == 0) { //child process pid = getpid(); ppid = getppid(); //take backup parent ctx_id src_ctx_id= p_ctx->context_id; src_adap_fd = p_ctx->adap_fd; //do unmap parent mmio 1st rc =munmap((void *)p_ctx->p_host_map, p_ctx->mmio_size); CHECK_RC_EXIT(rc, "munmap failed\n"); //do fresh attach for child rc = ctx_init_internal(p_ctx,DK_AF_ASSIGN_AFU,p_ctx->devno); CHECK_RC_EXIT(rc, "ctx_init_internal failed"); pthread_create(&thread, NULL,ctx_rrq_rx,p_ctx); //do clone rc = ioctl_dk_capi_clone(p_ctx, src_ctx_id,src_adap_fd); CHECK_RC_EXIT(rc, "clone ioctl failed"); //do read data for (i=0; i< MAX_RES_HANDLE;i++) { if (RES_CLOSED == resource[i]) continue; p_ctx->rsrc_handle = resource[i]; p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; nlba = (i+1)*p_ctx->chunk_size; for (st_lba=0;st_lba<nlba; st_lba+=(NUM_CMDS*stride)) { rc = send_read(p_ctx,st_lba,stride); CHECK_RC_EXIT(rc,"send_read failed\n"); rc = rw_cmp_buf_cloned(p_ctx, st_lba); CHECK_RC_EXIT(rc,"rw_cmp_buf_cloned failed\n"); } } sleep(1); //now create closed resources p_ctx->flags = DK_UVF_ALL_PATHS; for (i=0; i < 5;i++) { p_ctx->lun_size = (cl_index[i]+1)*p_ctx->chunk_size; rc = create_res(p_ctx); CHECK_RC_EXIT(rc,"res_create failed\n"); resource[cl_index[i]] = p_ctx->rsrc_handle; } //do io on new resources p_ctx->st_lba = 0; for (i=0;i<5;i++) { p_ctx->last_lba = ((cl_index[i]+1)*p_ctx->chunk_size) -1; p_ctx->res_hndl = resource[cl_index[i]] & RES_HNDLR_MASK; rc = do_io(p_ctx, stride); CHECK_RC_EXIT(rc, "do_io failed\n"); } pthread_cancel(thread); ctx_close(p_ctx); exit(0); } //child process end else { //create pthread sleep(1); //let child process do clone & read written data pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //do open closed res //now create closed resources p_ctx->flags = DK_UVF_ALL_PATHS; for (i=0; i < 5;i++) { p_ctx->lun_size = (cl_index[i]+1)*p_ctx->chunk_size; rc = create_res(p_ctx); CHECK_RC_EXIT(rc,"res_create failed\n"); resource[cl_index[i]] = p_ctx->rsrc_handle; } //do resize all resources & IO for (i=0;i<MAX_RES_HANDLE;i++) { p_ctx->req_size = (rand()%MAX_RES_HANDLE +1) * p_ctx->chunk_size; p_ctx->rsrc_handle = resource[i]; p_ctx->res_hndl = p_ctx->rsrc_handle & RES_HNDLR_MASK; rc = ioctl_dk_capi_vlun_resize(p_ctx); CHECK_RC(rc, "dk_capi_resize_ioctl failed\n"); rc = do_io(p_ctx, stride); CHECK_RC(rc, "do_io failed\n"); } //close res for (i=0;i<MAX_RES_HANDLE;i++) { p_ctx->rsrc_handle = resource[i]; rc = close_res(p_ctx); CHECK_RC(rc, "cose_res failed\n"); } pthread_cancel(thread); ctx_close(p_ctx); rc = wait4all(); } return rc; }
int test_ctx_reset() { int rc; struct ctx myctx; struct ctx *p_ctx= &myctx; pthread_t thread; __u64 buf_size = 0x2000000; //32MB __u64 chunk = 10; __u64 stride = 0x1000; struct rwlargebuf rwbuf; int i; pid=getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "ctx_init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx,chunk*p_ctx->chunk_size,DK_UVF_ASSIGN_PATH,LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); //do bad EA if (1) { debug("%d: ........place bad EA....\n", pid); fill_send_write(p_ctx, 0, pid, stride); for (i = 0; i < NUM_CMDS; i++) { p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; } bad_address = true; send_cmd(p_ctx); rc = wait_resp(p_ctx); sleep(1); //normal IO bad_address = false; debug("%d: .........after bad EA, do normal IO....\n", pid); rc = do_io(p_ctx, stride); CHECK_RC(rc,"Normal IO failed after bad EA"); //do bad RCB debug("%d: .........place bad RCB....\n", pid); bad_address = true; place_bad_addresses(p_ctx, 1); sleep(2); //normal IO debug("%d: ......after bad RCB, do normal IO....\n", pid); bad_address = false; rc = do_io(p_ctx, stride); CHECK_RC(rc,"Normal IO failed after bad RCB"); #ifdef _AIX rc = setRUnlimited(); CHECK_RC(rc,"setRUnlimited() failed"); #endif } //do large _transfer debug("%d: Do large transfer ....\n", pid); rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); rc = do_large_io(p_ctx, &rwbuf, buf_size); deallocate_buf(&rwbuf); buf_size = 0x100000; //4k rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); //normal io debug("%d: after large transfer,do normal IO ....\n", pid); rc = do_io(p_ctx, 0x10000); //rc = do_large_io(p_ctx, &rwbuf, buf_size); CHECK_RC(rc,"Normal IO failed after large transfer"); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int test_spio_lun(char *dev, dev64_t devno, __u16 lun_type, __u64 chunk) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; int loop=5; int i=0; __u64 nlba = 0; __u64 stride= 0x1000; pid = getpid(); rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); if ( LUN_DIRECT == lun_type) { rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); if (long_run_enable) stride=0x100; rc = do_io(p_ctx, stride); } else { rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); nlba = chunk * p_ctx->chunk_size; rc = vlun_resize(p_ctx, nlba); if (rc == 28) { fprintf(stderr, "%d:Requested was more..try with half now...\n",pid); nlba = nlba/2; rc = vlun_resize(p_ctx, nlba); if (rc == 28) { fprintf(stderr, "%d: No space left.. terminate this context..\n",pid); return 0; } } CHECK_RC(rc, "vlun_resize failed"); if (long_run_enable) { stride=0x1; //loop=20; } while (i++<loop) { if (long_run_enable) printf("%d:IO loop %d(%d) started....\n",pid,i,loop); rc = do_io(p_ctx, stride); if (rc) break; } } usleep(1000); //let all process do io pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int test_fc_port_reset_plun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 stride= 0x100; int ioCounter=0; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //for PLUN 2nd argument(lba_size) would be ignored rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); CHECK_RC(rc, "failed compare_size"); debug("-- Going to start IO.Please do chportfc -reset <pnum> at texan --\n"); do { rc = do_io(p_ctx, stride); if (rc !=0 ) { debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); if ( ioCounter==1 ) { debug("rc=%d, Going to verify.... \n",rc); p_ctx->flags=DK_VF_LUN_RESET; #ifdef _AIX p_ctx->hint = DK_HINT_SENSE; #else p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); } else { if (ioCounter > 1) { rc=-1; // IO failed third time break; } } } else { debug("rc=%d,IO succeeded \n",rc); g_error=0; } ioCounter++; rc|=g_error; sleep(3); } while ( rc !=0); debug("rc=%d,g_error=%d\n",rc,g_error); if ( ioCounter <= 1) { debug("WARNING: Test case not excuted properly... Please rerun\n"); rc =255; } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int ioctl_7_1_196() { int rc,i,j; struct ctx myctx[21],myctx_1, myctx_2; struct ctx *p_ctx[21],*p_ctx_1,*p_ctx_2; __u64 stride=0x1000,st_lba=0; pthread_t thread[20]; struct flash_disk disks[MAX_FDISK]; char disk1[30]; char disk2[30]; int cfdisk = MAX_FDISK; pid = getpid(); cfdisk = get_flash_disks(disks, FDISKS_SAME_ADPTR); //need to check the number of disks if (cfdisk < 2) { fprintf(stderr,"Must have 2 flash disks..\n"); TESTCASE_SKIP("Need disk from same adapter and each disk multipathed"); return 0; } strcpy(disk1,disks[0].dev); strcpy(disk2,disks[1].dev); // creating first context for (i=0;i<21;i++) { p_ctx[i]=&myctx[i]; } p_ctx_1=&myctx_1; p_ctx_2=&myctx_2; debug("1ST PROCEDURE\n"); // using p_ctx[[0] for LUN direct for firect disk /* rc = ctx_init2(p_ctx[0], disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); pthread_create(&thread[0], NULL, ctx_rrq_rx, p_ctx[0]); */ /* rc = create_resource(p_ctx[0], 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); */ // creating another 19 context LUN VIRTUAL for ( i=2;i<21;i++) { sleep(2); rc = ctx_init2(p_ctx[i], disks[1].dev, DK_AF_ASSIGN_AFU, disks[1].devno[0]); rc=create_resource(p_ctx[i], p_ctx[i]->chunk_size, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); } // do context reuse for direct LUN strcpy(p_ctx[0]->dev,disks[0].dev); strcpy(p_ctx[1]->dev,disks[1].dev); p_ctx[0]->fd = open_dev(disks[0].dev, O_RDWR); if (p_ctx[0]->fd < 0) { fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); g_error = -1; return -1; } p_ctx[1]->fd = open_dev(disks[1].dev, O_RDWR); //Hoping to open second disk if (p_ctx[1]->fd < 0) { fprintf(stderr, "open() failed: device %s, errno %d\n", disks[1].dev, errno); g_error = -1; } #ifdef _AIX rc = ioctl_dk_capi_query_path(p_ctx[0]); CHECK_RC(rc, "DK_CAPI_QUERY_PATH failed"); #else //TBD for linux #endif p_ctx[0]->work.num_interrupts = p_ctx[1]->work.num_interrupts = 4; rc=ioctl_dk_capi_attach_reuse(p_ctx[0],p_ctx[1],LUN_DIRECT); // CHECK_RC(rc, "DK_CAPI_ATTACH with reuse flag failed"); if ( rc != 0 ) { fprintf(stderr,"LUN DIRECT got attached to new disk with VLUN, should have succeeded"); return rc; } // initiate I/O on all the LUNs for (i=2;i<21;i++) { pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); rc = do_io(p_ctx[i], stride); } if ( rc != 0 ) { fprintf(stderr,"io on some LUN failed"); return rc; } /* using a goto-label removes the compile warning (-O3 issue) */ i=2; for_loop: pthread_cancel(thread[i]); close_res(p_ctx[i]); if (++i < 21) {goto for_loop;} ctx_close(p_ctx[2]); debug("2nd PROCEDURE\n"); // procedure 2 of the same case debug("%d: ........Phase 1 done.. Starting 2nd Phase........\n",getpid()); memset(p_ctx_1, 0, sizeof(struct ctx)); memset(p_ctx_2, 0, sizeof(struct ctx)); // open the first flash disk in write mode and create a DIRECT LUN // restoring from backup strcpy(disks[0].dev,disk1); p_ctx_1->fd = open_dev(disks[0].dev, O_WRONLY); if (p_ctx_1->fd < 0) { fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); return -1; } rc = ctx_init2(p_ctx_1, disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); pthread_create(&thread[0], NULL, ctx_rrq_rx, p_ctx_1); CHECK_RC(rc, "create context failed"); rc = create_resource(p_ctx_1, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); // open the same flash disk in read mode again. p_ctx_2->fd = open_dev(disks[0].dev, O_RDONLY); if (p_ctx_2->fd < 0) { fprintf(stderr, "open() failed: device %s, errno %d\n", disks[0].dev, errno); return -1; } rc = ctx_init2(p_ctx_2, disks[0].dev, DK_AF_ASSIGN_AFU, disks[0].devno[0]); pthread_create(&thread[1], NULL, ctx_rrq_rx, p_ctx_2); CHECK_RC(rc, "create context failed"); rc = create_resource(p_ctx_2, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); // now write to the disk and then read for (st_lba = 0; st_lba <= p_ctx_1->last_lba; st_lba += (NUM_CMDS*stride)) { rc = send_write(p_ctx_1, st_lba, stride, pid); CHECK_RC(rc, "send_write failed"); rc = send_read(p_ctx_2, st_lba, stride); CHECK_RC(rc, "send_read failed"); /*if (rc !=0 ) { rc = rw_cmp_buf(p_ctx_1, st_lba); if (rc != 0) { fprintf(stderr,"buf cmp failed for lba 0x%lX,rc =%d\n",st_lba,rc); break; } }*/ } if ( rc != 0 ) return rc; for (i=0;i<2;i++) { pthread_cancel(thread[i]); } //close_res(p_ctx_1); ctx_close(p_ctx_1); //close_res(p_ctx_2); ctx_close(p_ctx_2); debug("3rd PROCEDURE\n"); debug("%d: ........Phase 2 done.. Starting 3rd Phase........\n",getpid()); // case 3 of the same case // creating multiple process for LUN_DIRECT creation. for (j=0;j<long_run;j++) { for (i=0; i<20;i++) { if ( 0 == fork()) { rc = ctx_init(p_ctx[i]); CHECK_RC_EXIT(rc, "Context init failed"); // CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events rc = create_resource(p_ctx[i], 0, DK_UDF_ASSIGN_PATH , LUN_DIRECT); CHECK_RC_EXIT(rc, "create LUN_DIRECT failed"); // do io on context pthread_create(&thread[i], NULL, ctx_rrq_rx, p_ctx[i]); stride=0x1000; sleep(2); //do_io(p_ctx[i], stride); pthread_cancel(thread[i]); close_res(p_ctx[i]); exit(rc); } } wait4all(); } return 0; }