static void add_localized(rad_locale_t *rlocale, const char *pname, adr_data_t *farray, char *file) { if (rlocale != NULL && rlocale->language != NULL && strlen(rlocale->language)) { char path[PATH_MAX + 1]; (void) strlcpy(path, file, PATH_MAX); char *ext = strrchr(path, '.'); if (ext != NULL && strcmp(ext, ".jar") == 0) { *ext = '\0'; char *base = strrchr(path, '/'); if (base == NULL) { return; } *base++ = '\0'; char *fmt[] = {NULL, NULL, NULL}; /* * Use a ResourceBundle.getBundle-like algorithm - * <language>[_<territory>[@<modifier>]] - and order * from most- to least-specific. */ fmt[2] = "%s/locale/%s/%5$s_l10n.jar"; if (rlocale->territory != NULL) { fmt[1] = "%s/locale/%s_%s/%5$s_l10n.jar"; if (rlocale->modifier != NULL) { fmt[0] = "%s/locale/%s_%s@%s/" "%5$s_l10n.jar"; } } char l10njar[PATH_MAX]; for (int i = 0; i < RAD_COUNT(fmt); i++) { if (fmt[i] == NULL) { continue; } /* LINTED: E_SEC_PRINTF_VAR_FMT */ (void) snprintf(l10njar, RAD_COUNT(l10njar), fmt[i], path, rlocale->language, rlocale->territory, rlocale->modifier, base); if (access(l10njar, F_OK) == 0) { (void) adr_array_add(farray, create_resource(rlocale, pname, l10njar)); } } } } (void) adr_array_add(farray, create_resource(rlocale, pname, file)); }
int max_ctx_on_plun(int cmd) { int i; int rc = 0; struct ctx myctx; struct ctx *p_ctx=&myctx; pid = getpid(); pthread_t thread; int max_p = MAX_OPENS; for (i=0; i<max_p;i++) { if (0==fork()) { //child process pid = getpid(); debug("%d: ......process %d created...\n",pid,i); memset(p_ctx, 0, sizeof(myctx)); strcpy(p_ctx->dev, cflash_path); if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open failed %s, errno %d\n",cflash_path, errno); exit(rc); } #ifdef _AIX rc |= ioctl_dk_capi_query_path(p_ctx); rc|=ctx_init_internal(p_ctx, 0, p_ctx->devno); #else rc|=ctx_init_internal(p_ctx, 0x2, p_ctx->devno); #endif if (2 == cmd) rc |=create_resource(p_ctx,0,0,LUN_VIRTUAL); if (3 == cmd) rc |=create_resource(p_ctx,0,0,LUN_DIRECT); if (4 == cmd) { //do io all vluns created on path_id_mask pthread_create(&thread, NULL,ctx_rrq_rx,p_ctx); rc |= create_resource(p_ctx,p_ctx->chunk_size,0,LUN_VIRTUAL); rc |= do_io(p_ctx,0x10); pthread_cancel(thread); } sleep(10); //lets all context get created if ( 1 != cmd ) rc|=close_res(p_ctx); rc|=ctx_close(p_ctx); debug("%d:.exiting with rc=%d\n",pid,rc); exit(rc); } } rc=wait4all(); return rc; }
/* make a copy of a resource, initialize the target resource's reference count to 1. if the target resource already exists, then the reference count is incremented but the contents are overwritten. */ F8RES_API int copy_resource( const f8_uuid * s, const f8_uuid * t ) { F8_RESOURCE * r; if(query_resource(s) < 0){ return 0; } r = _get_res(t); if(r){ // _free_items(r->pItems); r->refcount++; }else{ create_resource(t); } r = _get_res(t); if(!r){ return F8_LOW_MEMORY; } return enum_res_items(s, _cp_item, (__int)t); }
int test_spio_plun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 stride= 0x10000; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //for PLUN 2nd argument(lba_size) would be ignored rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); CHECK_RC(rc, "failed compare_size"); rc = do_io(p_ctx, stride); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int do_attach_detach(char *dev, dev64_t devno, __u16 lun_type) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunk = 20; __u64 nlba; int count = 20; char *str = getenv("LONG_RUN"); if (str != NULL) { count = 100000; printf("LONG_RUN enabled...loop=%d\n",count); fflush(stdout); } pid = getpid(); while (count-- >0) { rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); CHECK_RC(rc, "Context init failed"); if (LUN_VIRTUAL == lun_type) { chunk = rand()%16; //create 0 vlun size & later call resize ioctl rc = create_resource(p_ctx, chunk, DK_UVF_ALL_PATHS, lun_type); CHECK_RC(rc, "create LUN_VIRTUAL failed"); chunk = rand()%32; nlba = chunk * p_ctx->chunk_size; rc = vlun_resize(p_ctx, nlba); CHECK_RC(rc, "vlun_resize failed"); } else { rc = create_resource(p_ctx,0, DK_UDF_ASSIGN_PATH, lun_type); CHECK_RC(rc, "create LUN_DIRECT failed"); } close_res(p_ctx); ctx_close(p_ctx); if (count%500 == 0) printf("%d: loop remains....\n",count); fflush(stdout); } return 0; }
/* associate a buffer with a guid and a key the old buffer will be freeed automatically. */ F8RES_API __bool set_res_buf_ex( const f8_uuid * id, const void * buffer, int length, const char * key, __bool bForce ) { F8_RESOURCE * res; F8_RESITEM * item; res = _get_res(id); if(!res && bForce){ create_resource(id); res = _get_res(id); } if(!res){ assert(0); return __false; } item = _get_item(res, key); if(!item){ item = _new_item(res, key); } if(!item){ return __false; } if(length == -1){ length = strlen((const char*)buffer) + 1; } if(item->buffer == buffer){ item->length = length; return __true; } if(item->buffer){ __free__(item->buffer); } if(length){ item->buffer = __malloc__(length); item->length = length; if(!item->buffer){ return __false; } memcpy(item->buffer, buffer, length); }else{ item->buffer = 0; item->length = 0; } return __true; }
// TODO String/TLV format lwm2m_resource *parse_resource(lwm2m_object *object, int resource_id, char *message, int message_len) { lwm2m_resource *resource = create_resource(object, resource_id); if (resource->multiple) { resource->instances = parse_multiple_resource(object, resource_id, message, message_len); } else { lwm2m_value value = parse_value_text(message, message_len, resource->type); __set_value(resource, &value, message_len); } return resource; }
KPROXY_API f8_bool kproxy_init(int leakDetect) { ITcpAdapter *a; _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); _CrtSetBreakAlloc(leakDetect); /* initialize and start kernel */ ex_init(); /* KERN_F_ENABLE_INPUT */ x_buffer = (char *)__malloc__(LARGE_BUF_SIZE); x_buf_len = LARGE_BUF_SIZE; b_ShellExit = __false; __new__(ITcpAdapter, a); proxy_adapter = __ucast__(ITcpAdapter, IKAdapter, a); sections = ke_get_blk(proxy_adapter->kernel, 0); assert(sections->uuid == BLK_ID_ROOT); create_resource(§ions->uuid); create_f8_uuid(§ions->h.uuid); create_resource(§ions->h.uuid); memset(fileName, 0 , sizeof(fileName)); g_shell = shell_create(commands); reset_reg_map(); init_blklib(); init_clipboard(); g_bDirty = __false; init_network(RTK_INIT_AS_SERVER, 0); vbus = connect_vbus(0xf8, VBUS_CONNECT_AS_SERVER, sr, 0); return __true; }
APU_DECLARE(apr_status_t) apr_reslist_acquire(apr_reslist_t *reslist, void **resource) { apr_status_t rv; apr_res_t *res; apr_thread_mutex_lock(reslist->listlock); /* If there are idle resources on the available list, use * them right away. */ if (reslist->nidle > 0) { /* Pop off the first resource */ res = pop_resource(reslist); *resource = res->opaque; free_container(reslist, res); apr_thread_mutex_unlock(reslist->listlock); return APR_SUCCESS; } /* If we've hit our max, block until we're allowed to create * a new one, or something becomes free. */ else while (reslist->ntotal >= reslist->hmax && reslist->nidle <= 0) { if (reslist->timeout) { if ((rv = apr_thread_cond_timedwait(reslist->avail, reslist->listlock, reslist->timeout)) != APR_SUCCESS) { apr_thread_mutex_unlock(reslist->listlock); return rv; } } else apr_thread_cond_wait(reslist->avail, reslist->listlock); } /* If we popped out of the loop, first try to see if there * are new resources available for immediate use. */ if (reslist->nidle > 0) { res = pop_resource(reslist); *resource = res->opaque; free_container(reslist, res); apr_thread_mutex_unlock(reslist->listlock); return APR_SUCCESS; } /* Otherwise the reason we dropped out of the loop * was because there is a new slot available, so create * a resource to fill the slot and use it. */ else { rv = create_resource(reslist, &res); if (rv == APR_SUCCESS) { reslist->ntotal++; *resource = res->opaque; } free_container(reslist, res); apr_thread_mutex_unlock(reslist->listlock); return rv; } }
// 7.1.217 : create two context for same flash disks shared between 2 adapters int test_cfdisk_ctxs_diff_devno() { int nDisk; int rc=0; struct flash_disk cfDisk[2]; struct ctx myctx1, myctx2; struct ctx *p_ctx1 = &myctx1; struct ctx *p_ctx2 = &myctx2; pid = getpid(); nDisk = get_flash_disks(cfDisk, FDISKS_DIFF_ADPTR); if (nDisk < 2) { fprintf(stderr,"Failed to find 2 flash disks from diff adapter..\n"); return -1; } // On AIX both dev will have same name // On Linux both dev will have diff name rc = ctx_init2(p_ctx1, cfDisk[0].dev, DK_AF_ASSIGN_AFU, cfDisk[0].devno[0]); CHECK_RC(rc, "p_ctx1 Context init failed"); rc = ctx_init2(p_ctx2, cfDisk[1].dev, DK_AF_ASSIGN_AFU, cfDisk[1].devno[0]); CHECK_RC(rc, "p_ctx2 Context init failed"); rc = create_resource(p_ctx1, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT for p_ctx1 failed"); rc = create_resource(p_ctx2, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT for p_ctx2 failed"); cleanup(p_ctx1, -1); cleanup(p_ctx2, -1); return 0; }
int test_large_transfer() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; pthread_t thread; struct rwlargebuf rwbuf; __u64 chunk=2; // do io on last 2 chunks on a plun __u64 buf_size[] = { 0x1000, //4KB 0x4000, //16KB 0x10000, //64KB 0x40000, //256KB 0x800000, //8MB 0x1000000 }; //16MB int i; //Large trasnfer size is for PLUN not Vluns(4K only) as per Jim pid = getpid(); #ifdef _AIX rc = setRUnlimited(); CHECK_RC(rc, "setRUnlimited failed"); #endif rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx,0,DK_UDF_ASSIGN_PATH,LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); p_ctx->st_lba= p_ctx->last_lba +1 -(chunk*p_ctx->chunk_size); if (long_run_enable) p_ctx->st_lba=0; //let do IO on complete plun for (i=0;i< sizeof(buf_size)/sizeof(__u64);i++) { rc = allocate_buf(&rwbuf, buf_size[i]); CHECK_RC(rc, "memory allocation failed"); printf("%d: do large io size=0X%"PRIX64"\n",pid, buf_size[i]); rc = do_large_io(p_ctx, &rwbuf, buf_size[i]); deallocate_buf(&rwbuf); if (rc) break; //get out from here } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
ResourcePtr ResourceProvider::get( const ResourceInfo& resource_infos ) { auto resource = find( resource_infos ); if( resource ) return resource; resource = create_resource( resource_infos ); if( resource ) { add_resource( resource_infos, resource ); } return resource; }
// TODO in multiple resource map there should be map <ID, lwm2m_resource*> list *parse_multiple_resource(lwm2m_object *object, int resource_id, char *message, int message_len) { list *resources = list_new(); tlv_header resource_header; char *curr = message; while (curr < message + message_len) { curr = parse_tlv_header(curr, &resource_header); lwm2m_resource *resource_instance = create_resource(object, resource_id); resource_instance->id = resource_header.id; lwm2m_value value = parse_value(curr, resource_header.length, resource_instance->type); __set_value(resource_instance, &value, resource_header.length); ladd(resources, resource_instance->id, resource_instance); curr = curr + resource_header.length; } return resources; }
/* dispatcher function. Uses the above functions to run. */ void dispatcher(PcbPtr queue) { clock_time = 0; input_queue = queue; io_resources = create_resource(PRINTERS,SCANNERS,MODEMS,CDS); memory = mabCreate(REAL_TIME_MEMORY+USER_TIME_MEMORY); memory = memAlloc(memory,REAL_TIME_MEMORY); // allocate memory for real time memory->id = 999; // 999 is for real time while (input_queue || user_queue || realtime_queue || current_process || p1_queue || p2_queue || p3_queue) { // there are items in the queues or a process is running enqueue_user_real_queues(); // add items to user queue and real time queue enqueue_roundrobin(); // add items to feedback queues if memory can be allocated current_process = running_processes(); //check running process and decrement time / suspend start_process(); // start next process in RR queue sleep(1); clock_time = clock_time+1; } memFree_all(memory); // free any remaining memory destroy_resource(io_resources); // free memory for resources }
// creating thread for creation VLUN or PLUN void *create_lun1(void *arg ) { struct ctx *p_ctx = (struct ctx *)arg; int rc; __u64 stride=0x8; rc = create_resource(p_ctx, p_ctx->lun_size, DK_UDF_ASSIGN_PATH, LUN_DIRECT); if ( rc == 0 ) { rc = do_io(p_ctx, stride); if ( rc !=0 ) { fprintf(stderr,"io failed on attached LUN\n"); //TBD Fix this return 2; return NULL; } } //TBD Fix this return 1; //return 1; return NULL; }
int test_large_trnsfr_boundary() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; pthread_t thread; struct rwlargebuf rwbuf; __u64 buf_size = 0x1000000; //16MB __u64 chunk = 10; pid = getpid(); #ifdef _AIX system("ulimit -d unlimited"); system("ulimit -s unlimited"); system("ulimit -m unlimited"); #endif rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //do RW last cmd with crossed LBA boundary //i.e. last_lba size is 0x100; //do send rw with 0x10 & cross limit of 0x100 rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); //to make sure last cmd rw beyond boundary p_ctx->st_lba = p_ctx->last_lba - (chunk * p_ctx->chunk_size); p_ctx->st_lba = p_ctx->st_lba +20 ; rc = do_large_io(p_ctx, &rwbuf, buf_size); deallocate_buf(&rwbuf); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int main(int argc, char **argv) { glfwSetErrorCallback(error_cb); glfwInit(); glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); glfwWindowHint(GLFW_RESIZABLE, GL_FALSE); GLFWwindow *win = glfwCreateWindow(500, 500, __FILE__, NULL, NULL); glfwMakeContextCurrent(win); glfwSetKeyCallback(win, key_cb); glewExperimental = GL_TRUE; glewInit(); glViewport(0, 0, 500, 500); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glClear(GL_COLOR_BUFFER_BIT); compile_shader(); create_resource(); while (!glfwWindowShouldClose(win)) { glfwPollEvents(); glDrawArrays(GL_TRIANGLES, 0, 3); glfwSwapBuffers(win); } glfwTerminate(); return 0; }
/* move_resource will decrement usage count of the source resource, note this will not necessary cause the source resource to be deleted, only if its usage count reaches zero. */ F8RES_API int move_resource( const f8_uuid * s, const f8_uuid * t ) { int r; F8_RESOURCE *tr, *sr; if(query_resource(t) >= 0){ return F8_NAME_DUPLICATE; } sr = _get_res(s); if(!sr){ return F8_OBJECT_NOT_FOUND; } if(!create_resource(t)){ return F8_LOW_MEMORY; } if(sr->refcount == 1){ /* a little optimization, if the reference count of source is 1, then no actual copy is made, instead, we move the contents of s to t, and detach s from its contents. */ tr = _get_res(t); delete tr->pItems; tr->pItems = _get_res(s)->pItems; sr->pItems = 0; _free_resource(sr); r = F8_SUCCESS; }else{ r = copy_resource(s, t); } /* this will decrement the reference count, and remove the resource entry if necessary */ unload_resource(s); return r; }
/** Returns map of resources **/ list *parse_instance(lwm2m_object *object, char *message, int message_len) { list *resources = list_new(); tlv_header resource_header; char *curr = message; while (curr < message + message_len) { curr = parse_tlv_header(curr, &resource_header); lwm2m_resource *resource = create_resource(object, resource_header.id); if (resource_header.type == MULTIPLE_RESOURCE_TYPE) { resource->instances = parse_multiple_resource(object, resource->id, curr, resource_header.length); resource->multiple = true; } else { lwm2m_value value = parse_value(curr, resource_header.length, resource->type); __set_value(resource, &value, resource_header.length); } ladd(resources, resource->id, resource); curr = curr + resource_header.length; } return resources; }
void* res_thread(void *arg) { int rc; struct ctx *p_ctx = (struct ctx *)arg; res_hndl_t res_hndl; __u64 rsrc_handle; __u64 stride = 0x1000; pthread_mutex_lock(&mutex); rc = create_resource(p_ctx, p_ctx->lun_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); res_hndl = p_ctx->res_hndl; rsrc_handle = p_ctx->rsrc_handle; if (rc) { g_error = -1; pthread_mutex_unlock(&mutex); return NULL; } p_ctx->res_hndl = res_hndl; rc = do_io(p_ctx, stride); if (rc) { g_error = -1; pthread_mutex_unlock(&mutex); return NULL; } pthread_mutex_unlock(&mutex); sleep(1); // Closing the resource after IO done sleep(2); pthread_mutex_lock(&mutex); p_ctx->rsrc_handle = rsrc_handle; rc = close_res(p_ctx); pthread_mutex_unlock(&mutex); return 0; }
int max_vlun_on_a_ctx() { int i; int rc; struct ctx myctx; struct ctx *p_ctx=&myctx; pid = getpid(); rc=ctx_init(p_ctx); __u64 vluns[MAX_VLUNS]; for (i=0;i<MAX_VLUNS;i++) { rc = create_resource(p_ctx,p_ctx->chunk_size,0,LUN_VIRTUAL); CHECK_RC(rc, "create_resource Failed\n"); vluns[i]=p_ctx->rsrc_handle; } for (i=0;i<MAX_VLUNS;i++) { p_ctx->rsrc_handle=vluns[i]; rc=close_res(p_ctx); CHECK_RC(rc, "close_res failed\n"); } rc = ctx_close(p_ctx); return rc; }
int test_ctx_reset() { int rc; struct ctx myctx; struct ctx *p_ctx= &myctx; pthread_t thread; __u64 buf_size = 0x2000000; //32MB __u64 chunk = 10; __u64 stride = 0x1000; struct rwlargebuf rwbuf; int i; pid=getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "ctx_init failed"); pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx,chunk*p_ctx->chunk_size,DK_UVF_ASSIGN_PATH,LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); //do bad EA if (1) { debug("%d: ........place bad EA....\n", pid); fill_send_write(p_ctx, 0, pid, stride); for (i = 0; i < NUM_CMDS; i++) { p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; } bad_address = true; send_cmd(p_ctx); rc = wait_resp(p_ctx); sleep(1); //normal IO bad_address = false; debug("%d: .........after bad EA, do normal IO....\n", pid); rc = do_io(p_ctx, stride); CHECK_RC(rc,"Normal IO failed after bad EA"); //do bad RCB debug("%d: .........place bad RCB....\n", pid); bad_address = true; place_bad_addresses(p_ctx, 1); sleep(2); //normal IO debug("%d: ......after bad RCB, do normal IO....\n", pid); bad_address = false; rc = do_io(p_ctx, stride); CHECK_RC(rc,"Normal IO failed after bad RCB"); #ifdef _AIX rc = setRUnlimited(); CHECK_RC(rc,"setRUnlimited() failed"); #endif } //do large _transfer debug("%d: Do large transfer ....\n", pid); rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); rc = do_large_io(p_ctx, &rwbuf, buf_size); deallocate_buf(&rwbuf); buf_size = 0x100000; //4k rc = allocate_buf(&rwbuf, buf_size); CHECK_RC(rc, "memory allocation failed"); //normal io debug("%d: after large transfer,do normal IO ....\n", pid); rc = do_io(p_ctx, 0x10000); //rc = do_large_io(p_ctx, &rwbuf, buf_size); CHECK_RC(rc,"Normal IO failed after large transfer"); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int mc_invalid_ioarcb(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; __u64 chunks=32; __u64 actual_size=0; __u64 vlba =0; __u32 *p_u32; __u64 stride; __u64 *p_u64; pthread_t thread; mc_stat_t l_mc_stat; int i; pid = getpid(); signal(SIGABRT, sig_handle); signal(SIGSEGV, sig_handle); rc = mc_init(); CHECK_RC(rc, "mc_init failed"); debug("mc_init success :%d\n",rc); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); pthread_create(&thread,NULL,ctx_rrq_rx, p_ctx); if (15 == cmd) { //PLBA out of range rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "opening res_hndl"); actual_size = (p_ctx->last_lba+1)/p_ctx->chunk_size; } else { p_ctx->flags = DK_UVF_ALL_PATHS; rc = create_res(p_ctx); CHECK_RC(rc, "opening res_hndl"); rc = mc_size1(p_ctx,chunks, &actual_size); CHECK_RC(rc, "mc_size"); } rc = mc_stat1(p_ctx, &l_mc_stat); CHECK_RC(rc, "mc_stat"); stride = 1 << l_mc_stat.nmask; vlba = (actual_size * (1 << l_mc_stat.nmask))-1; fill_send_write(p_ctx, vlba, pid, stride); for (i = 0; i < NUM_CMDS; i++) { if (1 == cmd) { //invalid upcode debug("invalid upcode(0xFA) action = %d\n",cmd); p_ctx->cmd[i].rcb.cdb[0] = 0xFA; } else if (2 == cmd) { //EA = NULL debug("EA = NULL action = %d\n",cmd); p_ctx->cmd[i].rcb.data_ea = (__u64)NULL; #ifdef _AIX bad_address = true; #endif } else if (3 == cmd) { //invalid flgas p_ctx->cmd[i].rcb.req_flags = SISL_REQ_FLAGS_RES_HNDL; p_ctx->cmd[i].rcb.req_flags |= SISL_REQ_FLAGS_HOST_READ; debug("invalid flag = 0X%X\n",p_ctx->cmd[i].rcb.req_flags); } else if (5 == cmd) { //SISL_AFU_RC_RHT_INVALID p_ctx->cmd[i].rcb.res_hndl = p_ctx->res_hndl + 2; } else if ( 6 == cmd) { //SISL_AFU_RC_RHT_OUT_OF_BOUNDS p_ctx->cmd[i].rcb.res_hndl = MAX_RES_HANDLE; } else if (7 == cmd) { //invalid address for page fault debug("setting EA = 0x1234 to generate error page fault\n"); p_ctx->cmd[i].rcb.data_ea = (__u64)0x1234; #ifdef _AIX bad_address = true; #endif } else if (8 == cmd) { //invalid ctx_id debug("%d: sending invalid ctx id\n", pid); p_ctx->cmd[i].rcb.ctx_id = p_ctx->ctx_hndl +10; } else if (9 == cmd) { //test flag underrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0])/2; } else if (10 == cmd) { // test flag overrun p_ctx->cmd[i].rcb.data_len = sizeof(p_ctx->wbuf[0]); p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, 2); } else if (11 == cmd) { //rc scsi_rc_check p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, p_ctx->blk_len +1); } else if (12 == cmd) { //data len 0 in ioarcb p_ctx->cmd[i].rcb.data_len = 0; } else if (13 == cmd) { //NUM BLK to write 0 p_u32 = (__u32*)&p_ctx->cmd[i].rcb.cdb[10]; write_32(p_u32, 0); } else if ((14 == cmd) || (15 == cmd)) { //test out of range LBAs p_u64 = (__u64*)&p_ctx->cmd[i].rcb.cdb[2]; vlba += i+1; write_lba(p_u64, vlba); } } //test BAD IOARCB, IOASA & CMD room violation if (cmd >= 100) { if (100 == cmd) { //bad RCB place_bad_addresses(p_ctx, 1); usleep(1000); if (err_afu_intrpt) //cool expected res rc = 100; else rc = -1; goto END; } else if (101 == cmd) { //bad IOASA handle_bad_ioasa(p_ctx, pid); usleep(1000); //sleep sometime to process rcb cmd by AFU //And let handle rrq event //how to handle error, rrq thread should throw some error return -1; } else if (102 == cmd) { //cmd_room violation place_bad_addresses(p_ctx, 3); usleep(1000); #ifdef _AIX if (err_afu_intrpt) //cool expected res rc = 102; else rc = -1; goto END; #endif } else if (103 == cmd) { //bad HRRQ place_bad_addresses(p_ctx, 2); usleep(1000); if (err_afu_intrpt) //cool expected res rc = 103; else rc = -1; goto END; } } else { send_cmd(p_ctx); } rc = wait_resp(p_ctx); if ( cmd >= 9 && cmd <= 13) { if (!rc_flags) { if (!dont_displa_err_msg) fprintf(stderr, "%d: Expecting rc flags non zero\n", pid); rc = -1; } } if (4 == cmd) { //invalid fc port & lun id debug("invalid fc port(0xFF)&lun id(0X1200), action=%d",cmd); fill_send_write(p_ctx, vlba, pid, stride); for (i = 0; i < NUM_CMDS; i++) { p_ctx->cmd[i].rcb.lun_id = 0x12000; p_ctx->cmd[i].rcb.port_sel = 0xff; } //send_single_cmd(p_ctx); send_cmd(p_ctx); rc = wait_resp(p_ctx); } #ifdef _AIX if ((7 == cmd || 2 == cmd)&& (err_afu_intrpt)) rc = 7; #endif END: pthread_cancel(thread); close_res(p_ctx); //mc_unregister(p_ctx->mc_hndl); //xerror: ctx_close(p_ctx); mc_term(); return rc; }
/** * Perform routine maintenance on the resource list. This call * may instantiate new resources or expire old resources. */ static apr_status_t reslist_maint(apr_reslist_t *reslist) { apr_time_t now; apr_status_t rv; apr_res_t *res; int created_one = 0; apr_thread_mutex_lock(reslist->listlock); /* Check if we need to create more resources, and if we are allowed to. */ while (reslist->nidle < reslist->min && reslist->ntotal < reslist->hmax) { /* Create the resource */ rv = create_resource(reslist, &res); if (rv != APR_SUCCESS) { free_container(reslist, res); apr_thread_mutex_unlock(reslist->listlock); return rv; } /* Add it to the list */ push_resource(reslist, res); /* Update our counters */ reslist->ntotal++; /* If someone is waiting on that guy, wake them up. */ rv = apr_thread_cond_signal(reslist->avail); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(reslist->listlock); return rv; } created_one++; } /* We don't need to see if we're over the max if we were under it before */ if (created_one) { apr_thread_mutex_unlock(reslist->listlock); return APR_SUCCESS; } /* Check if we need to expire old resources */ now = apr_time_now(); while (reslist->nidle > reslist->smax && reslist->nidle > 0) { /* Peak at the last resource in the list */ res = APR_RING_LAST(&reslist->avail_list); /* See if the oldest entry should be expired */ if (now - res->freed < reslist->ttl) { /* If this entry is too young, none of the others * will be ready to be expired either, so we are done. */ break; } APR_RING_REMOVE(res, link); reslist->nidle--; reslist->ntotal--; rv = destroy_resource(reslist, res); free_container(reslist, res); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(reslist->listlock); return rv; } } apr_thread_mutex_unlock(reslist->listlock); return APR_SUCCESS; }
int test_spio_lun(char *dev, dev64_t devno, __u16 lun_type, __u64 chunk) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; int loop=5; int i=0; __u64 nlba = 0; __u64 stride= 0x1000; pid = getpid(); rc = ctx_init2(p_ctx, dev, DK_AF_ASSIGN_AFU, devno); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); if ( LUN_DIRECT == lun_type) { rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); if (long_run_enable) stride=0x100; rc = do_io(p_ctx, stride); } else { rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); nlba = chunk * p_ctx->chunk_size; rc = vlun_resize(p_ctx, nlba); if (rc == 28) { fprintf(stderr, "%d:Requested was more..try with half now...\n",pid); nlba = nlba/2; rc = vlun_resize(p_ctx, nlba); if (rc == 28) { fprintf(stderr, "%d: No space left.. terminate this context..\n",pid); return 0; } } CHECK_RC(rc, "vlun_resize failed"); if (long_run_enable) { stride=0x1; //loop=20; } while (i++<loop) { if (long_run_enable) printf("%d:IO loop %d(%d) started....\n",pid,i,loop); rc = do_io(p_ctx, stride); if (rc) break; } } usleep(1000); //let all process do io pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
int test_spio_vlun(int cmd) { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 chunk = 0x10; __u64 nlba; __u64 stride=0x10000; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); if (3 == cmd) { //IO ON NO RES expect AFURC p_ctx->last_lba = chunk * p_ctx->chunk_size -1; rc = do_io(p_ctx, stride); pthread_cancel(thread); ctx_close(p_ctx); return rc; } //create 0 vlun size & later call resize ioctl if (1 == cmd) { //0 size debug("%d: create VLUN with 0 size\n", pid); rc = create_resource(p_ctx, 0, DK_UVF_ASSIGN_PATH, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); #ifdef _AIX rc = compare_size(p_ctx->last_lba, 0); #else rc = compare_size(p_ctx->last_lba, -1); #endif CHECK_RC(rc, "failed compare_size"); p_ctx->last_lba=0xFFFF; rc = do_io(p_ctx,stride); if (rc != 0x13 ) { CHECK_RC(1,"IO should fail with afu_rc=0x13\n"); } else { fprintf(stderr, "IO failed as expected, don't worry....\n"); g_error=0; rc=0; } } else { nlba = 1 * (p_ctx->chunk_size); rc = create_resource(p_ctx, nlba, DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); } nlba = chunk * (p_ctx->chunk_size); rc = vlun_resize(p_ctx, nlba); CHECK_RC(rc, "vlun_resize failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); //i would like to write/read all lbas //stride = p_ctx->blk_len; rc |= do_io(p_ctx, stride); rc |= vlun_resize(p_ctx, 0); rc |= vlun_resize(p_ctx, nlba); rc |= do_io(p_ctx, stride); pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); rc |= g_error; return rc; }
int test_fc_port_reset_vlun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; int ioCounter=0; __u64 nlba; __u64 stride=0x1; pid = getpid(); #ifdef _AIX memset(p_ctx, 0, sizeof(myctx)); strcpy(p_ctx->dev, cflash_path); if ((p_ctx->fd =open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open %s failed, errno=%d\n",p_ctx->dev,errno); return -1; } rc = ioctl_dk_capi_query_path(p_ctx); CHECK_RC(rc,"dk_capi_query_path failed..\n"); rc = ctx_init_internal(p_ctx, 0,p_ctx->devno); #else rc = ctx_init(p_ctx); #endif CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); nlba = 1 * (p_ctx->chunk_size); rc = create_resource(p_ctx, nlba, 0, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); rc = compare_size(p_ctx->last_lba, nlba-1); CHECK_RC(rc, "failed compare_size"); debug("-- Going to start IO.Please do chportfc -reset <pnum> at texan --\n"); debug("rc=%d,g_error=%d\n",rc,g_error); do { rc = do_io(p_ctx, stride); if (rc !=0 ) { debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); if ( ioCounter==1 ) { debug("rc=%d, Going to verify.... \n",rc); p_ctx->flags=DK_VF_LUN_RESET; #ifdef _AIX p_ctx->hint = DK_HINT_SENSE; #else p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); } else { if (ioCounter > 1) { rc=-1; // IO failed third time break; } } } else { debug("rc=%d,IO succeeded \n",rc); g_error=0; } ioCounter++; rc|=g_error; sleep(3); } while ( rc !=0); debug("rc=%d,g_error=%d\n",rc,g_error); if ( ioCounter <= 1) { debug("WARNING: Test case not excuted properly... Please rerun\n"); rc =255; } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); rc |= g_error; return rc; }
int no_recover_and_ioctl() { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; //__u64 flags; pthread_t thread; __u64 chunk = 0x1; __u64 stride= 0x1; pthread_t ioThreadId; #ifdef _AIX //these are unused on Linux int msgid; struct mymsgbuf msg_buf; #endif do_io_thread_arg_t ioThreadData; do_io_thread_arg_t * p_ioThreadData=&ioThreadData; char * noIOP = getenv("NO_IO"); pid = getpid(); printf("%d:no_recover_and_ioctl process created...\n",pid); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), DK_UVF_ALL_PATHS, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); if ( noIOP == NULL ) { //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); p_ioThreadData->p_ctx=p_ctx; p_ioThreadData->stride=stride; p_ioThreadData->loopCount=100; rc = pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); CHECK_RC(rc, "do_io_thread() pthread_create failed"); } #ifdef _AIX rc = do_eeh(p_ctx); #else rc = do_poll_eeh(p_ctx); #endif if ( noIOP == NULL ) { pthread_join(ioThreadId, NULL); } if ( noIOP == NULL ) pthread_cancel(thread); #ifdef _AIX msgid = msgget(key, IPC_CREAT | 0666); if (msgid < 0 ) { fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); return -1; } if (msgrcv(msgid, &msg_buf, 2, 2, 0) < 0) { fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); return -1; } sleep(1); rc = create_resource(p_ctx, p_ctx->chunk_size, DK_UVF_ALL_PATHS, LUN_VIRTUAL); rc |= vlun_resize(p_ctx, 2*p_ctx->chunk_size); rc |= close_res(p_ctx); rc |= ctx_close(p_ctx); #else // For the lost context, we will create another new. rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); rc = create_resource(p_ctx, chunk *(p_ctx->chunk_size), DK_UVF_ALL_PATHS, LUN_VIRTUAL); pthread_cancel(thread); #endif return rc; }
int test_fc_port_reset_plun() { int rc; struct ctx myctx; struct ctx *p_ctx = &myctx; pthread_t thread; __u64 stride= 0x100; int ioCounter=0; pid = getpid(); rc = ctx_init(p_ctx); CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //for PLUN 2nd argument(lba_size) would be ignored rc = create_resource(p_ctx, 0, DK_UDF_ASSIGN_PATH, LUN_DIRECT); CHECK_RC(rc, "create LUN_DIRECT failed"); rc = compare_size(p_ctx->last_lba, p_ctx->last_phys_lba); CHECK_RC(rc, "failed compare_size"); debug("-- Going to start IO.Please do chportfc -reset <pnum> at texan --\n"); do { rc = do_io(p_ctx, stride); if (rc !=0 ) { debug("rc=%d,ioCounter=%d,IO failed..... \n",rc,ioCounter); if ( ioCounter==1 ) { debug("rc=%d, Going to verify.... \n",rc); p_ctx->flags=DK_VF_LUN_RESET; #ifdef _AIX p_ctx->hint = DK_HINT_SENSE; #else p_ctx->hint = DK_CXLFLASH_VERIFY_HINT_SENSE; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed\n"); } else { if (ioCounter > 1) { rc=-1; // IO failed third time break; } } } else { debug("rc=%d,IO succeeded \n",rc); g_error=0; } ioCounter++; rc|=g_error; sleep(3); } while ( rc !=0); debug("rc=%d,g_error=%d\n",rc,g_error); if ( ioCounter <= 1) { debug("WARNING: Test case not excuted properly... Please rerun\n"); rc =255; } pthread_cancel(thread); close_res(p_ctx); ctx_close(p_ctx); return rc; }
//int create_res_hndl_afu_reset(char *dev, dev64_t devno, __u64 chunk) int create_res_hndl_afu_reset(bool do_recover, bool last) { int rc; struct ctx my_ctx; struct ctx *p_ctx = &my_ctx; //int i; pthread_t thread; __u64 chunk = 0x1; __u64 stride= 0x1; int msgid; struct mymsgbuf msg_buf; pthread_t ioThreadId; do_io_thread_arg_t ioThreadData; do_io_thread_arg_t * p_ioThreadData=&ioThreadData; // we have to export "NO_IO; if we want to avoid IO char * noIOP = getenv("NO_IO"); pid = getpid(); #ifdef _AIX memset(p_ctx,0,sizeof(my_ctx)); strcpy(p_ctx->dev,cflash_path); if ((p_ctx->fd = open_dev(p_ctx->dev, O_RDWR)) < 0) { fprintf(stderr,"open failed %s, errno %d\n",p_ctx->dev, errno); return -1; } rc = ioctl_dk_capi_query_path(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_query_path failed...\n"); rc = ctx_init_internal(p_ctx, 0, p_ctx->devno); #else rc = ctx_init(p_ctx); #endif CHECK_RC(rc, "Context init failed"); //thread to handle AFU interrupt & events if ( noIOP == NULL ) pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); //create 0 vlun size & later call resize ioctl rc = create_resource(p_ctx, chunk * (p_ctx->chunk_size), 0, LUN_VIRTUAL); CHECK_RC(rc, "create LUN_VIRTUAL failed"); //last new process send message to waiting process //that new ctx created now you can try to reattach msgid = msgget(key, IPC_CREAT | 0666); if (msgid < 0 ) { fprintf(stderr, "%d: msgget() failed before msgsnd()\n", pid); return -1; } memset(&msg_buf, 0, sizeof(struct mymsgbuf)); if (last) { goto end; } if ( noIOP == NULL ) { p_ioThreadData->p_ctx=p_ctx; p_ioThreadData->stride=stride; p_ioThreadData->loopCount=0x100000; // Need this to go on 10 secs debug("%d: things look good, doing IO...\n",pid); rc =pthread_create(&ioThreadId,NULL, do_io_thread, (void *)p_ioThreadData); CHECK_RC(rc, "do_io_thread() pthread_create failed"); } #ifdef _AIX rc = do_eeh(p_ctx); #else rc = do_poll_eeh(p_ctx); #endif g_error=0; //reset any prev error might caught while EEH if ( noIOP == NULL ) { pthread_join(ioThreadId, NULL); } #ifndef _AIX //for linux if ( noIOP == NULL ) pthread_cancel(thread); #endif //We here after EEH done if (do_recover) { //do if recover true debug("%d: woow EEH is done recovering...\n",pid); rc = ioctl_dk_capi_recover_ctx(p_ctx); CHECK_RC(rc, "ctx reattached failed"); msg_buf.mtype =2; strcpy(msg_buf.mtext, "K"); if (msgsnd(msgid, &msg_buf, 2, IPC_NOWAIT) < 0) { fprintf(stderr, "%d: msgsnd failed\n", pid); return -1; } #ifdef _AIX if (p_ctx->return_flags != DK_RF_REATTACHED) CHECK_RC(1, "recover ctx, expected DK_RF_REATTACHED"); p_ctx->flags = DK_VF_HC_TUR; p_ctx->hint = DK_HINT_SENSE; #endif fflush(stdout); ctx_reinit(p_ctx); #ifdef _AIX p_ctx->hint=DK_HINT_SENSE; #else p_ctx->hint=DK_CXLFLASH_VERIFY_HINT_SENSE; // if dummy_sense_flag is set; // a dummy sense data will be copied into ioctl input p_ctx->dummy_sense_flag=1; // if dummy_sense_flag is set; #endif rc = ioctl_dk_capi_verify(p_ctx); CHECK_RC(rc, "ioctl_dk_capi_verify failed"); #ifndef _AIX //for linux pthread_create(&thread, NULL, ctx_rrq_rx, p_ctx); #endif } else { //last one is /*msgid = msgget(key, IPC_CREAT | 0666); if(msgid < 0 ){ fprintf(stderr, "%d: msgget() failed before msgrcv()\n", pid); return -1; } debug("%d: Going to wait at msgrcv()..\n", pid); fflush(stdout); if(msgrcv(msgid, &msg_buf, 2, 1, 0) < 0) { fprintf(stderr, "%d: msgrcv failed with errno %d\n", pid, errno); return -1; } debug("%d: Got out of msgrcv()..EEH is done, Try to recover....\n",pid); */ //as per today(9/28/2015) discussion with Sanket that //new attach will fail until holding context not exited //hope same apply for Linux as well return 100; /*rc = ioctl_dk_capi_recover_ctx(p_ctx); if(rc) return 100; //this to make sure recover failed else { fprintf(stderr,"%d:com'on recover should fail here...\n",pid); return 1; // we don't want to try IO anyway }*/ } end: if ( noIOP == NULL ) { stride=0x1; rc = do_io(p_ctx, stride); CHECK_RC(rc, "IO failed after EEH/recover"); } if ( noIOP == NULL ) pthread_cancel(thread); sleep(1); fflush(stdout); sleep(5); // additional time to be safe ! rc=close_res(p_ctx); sleep(5); // Don't let child exit to keep max ctx alive rc |= ctx_close(p_ctx); CHECK_RC(rc,"ctx close or close_res failed\n"); return rc; }