int slurm_spank_task_init (spank_t sp, int ac, char **av) { char nbuf [4096], obuf [4096]; char label [64]; const char *preload = "libplasticfs.so"; const char *lflags = flags ? flags : ""; if (!enabled) return (0); /* append to LD_PRELOAD (with a space) */ if (spank_getenv (sp, "LD_PRELOAD", obuf, sizeof (obuf)) == ESPANK_SUCCESS) snprintf (nbuf, sizeof (nbuf), "%s %s", obuf, preload); else strncpy (nbuf, preload, strlen (preload)); if (spank_setenv (sp, "LD_PRELOAD", nbuf, 1) != ESPANK_SUCCESS) slurm_error ("Failed to set LD_PRELOAD=%s\n", nbuf); /* prepend to PLASTICFS (with a pipe) */ _iotrace_label (sp, label, sizeof (label)); if (spank_getenv (sp, "PLASTICFS", obuf, sizeof (obuf)) == ESPANK_SUCCESS) snprintf (nbuf, sizeof (nbuf), "log - %s %s | %s", label, lflags, obuf); else snprintf (nbuf, sizeof (nbuf), "log - %s %s", label, flags); if (spank_setenv (sp, "PLASTICFS", nbuf, 1) != ESPANK_SUCCESS) slurm_error ("Failed to set PLASTICFS=%s\n", nbuf); return (0); }
int slurm_spank_user_init (spank_t sp, int ac, char **av) { char socket_name[4096]; uid_t uid; uint32_t jobid, stepid, nodeid; spank_get_item(sp, S_JOB_UID, &uid); spank_get_item(sp, S_JOB_ID, &jobid); spank_get_item(sp, S_JOB_STEPID, &stepid); spank_get_item(sp, S_JOB_NODEID, &nodeid); if (nodeid != 0) return 0; /* WARNING - If you change the file name here at all, you must update the file name in src/aixslurm/internal.c as well. Even though we have spank set the YOGRT_AIXSLURM_SOCKET variable, the pmdv4 daemon will NOT pass the variable down to the user's task. */ snprintf(socket_name, sizeof(socket_name), "/tmp/.yogrtaixslurm_%d_%u.%u", uid, jobid, stepid); spank_setenv(sp, "YOGRT_AIXSLURM_SOCKET", socket_name, 1); helper_pid = start_helper(socket_name, jobid); return 0; }
int _x11_init_remote_inter(spank_t sp,uint32_t jobid,uint32_t stepid) { FILE* f; int status = -1; char* cmd_pattern= X11_LIBEXEC_PROG " -i %u.%u -g"; char* cmd; size_t cmd_length; char display[256]; /* build slum-spank-x11 command to retrieve connected DISPLAY to use */ cmd_length = strlen(cmd_pattern) + 128 ; cmd = (char*) malloc(cmd_length*sizeof(char)); if ( cmd == NULL || snprintf(cmd,cmd_length,cmd_pattern,jobid,stepid) >= cmd_length ) { ERROR("x11: error while building cmd"); status = -2; } else { /* execute the command to retrieve the DISPLAY value to use */ f = popen(cmd,"r"); if ( f != NULL ) { if ( fscanf(f,"%255s\n",display) == 1 ) { if ( spank_setenv(sp,"DISPLAY",display,1) != ESPANK_SUCCESS ) { ERROR("x11: unable " "to set DISPLAY in env"); status = -5; } else { INFO("x11: now using DISPLAY=%s", display); status = 0; } } else { ERROR("x11: unable to read DISPLAY value"); status = -4; } pclose(f); } else { ERROR("x11: unable to exec get cmd '%s'",cmd); status = -3; } free(cmd); } return status; }
int _x11_init_remote_batch(spank_t sp,uint32_t jobid,uint32_t stepid) { int status; FILE* f; char localhost[256]; char* cmd_pattern= X11_LIBEXEC_PROG " -u %s -s \"%s\" -o \"%s\" -f %s -d %s -t %s -i %u.%u -cwg %s &"; char* cmd; size_t cmd_length; char display[256]; struct passwd user_pwent; struct passwd *p_pwent; size_t pwent_buffer_length = sysconf(_SC_GETPW_R_SIZE_MAX); char pwent_buffer[pwent_buffer_length]; job_info_msg_t * job_buffer_ptr; job_info_t* job_ptr; /* * get current hostname */ if ( gethostname(localhost,256) != 0 ) { status = -20; goto exit; } /* * the batch script inherits the DISPLAY value of the * submission command. We will use it on the allocation node * for proper establishment of a working X11 ssh tunnel */ if ( spank_getenv(sp,"DISPLAY",display,256) != ESPANK_SUCCESS ) { ERROR("x11: unable to read batch step " "inherited DISPLAY value"); status = -1; goto exit; } /* get job infos */ status = slurm_load_job(&job_buffer_ptr,jobid,SHOW_ALL); if ( status != 0 ) { ERROR("x11: unable to get job infos"); status = -3; goto exit; } /* check infos validity */ if ( job_buffer_ptr->record_count != 1 ) { ERROR("x11: job infos are invalid"); status = -4; goto clean_exit; } job_ptr = job_buffer_ptr->job_array; /* get user name */ status = getpwuid_r(job_ptr->user_id,&user_pwent,pwent_buffer, pwent_buffer_length,&p_pwent) ; if (status) { error("x11: unable to get username for uid=%u : %s",job_ptr->user_id, strerror(status)) ; status = -10; goto clean_exit; } /* * build the command line that will be used to forward the * alloc node X11 tunnel */ cmd_length = strlen(cmd_pattern) + 128 ; cmd = (char*) malloc(cmd_length*sizeof(char)); if ( cmd == NULL || snprintf(cmd,cmd_length,cmd_pattern,user_pwent.pw_name, (ssh_cmd == NULL) ? DEFAULT_SSH_CMD : ssh_cmd, (ssh_args == NULL) ? DEFAULT_SSH_ARGS : ssh_args, job_ptr->alloc_node,display,localhost,jobid,stepid, (helpertask_args == NULL) ? DEFAULT_HELPERTASK_ARGS : helpertask_args) >= cmd_length ) { ERROR("x11: error while building cmd"); status = -2; } else { INFO("x11: batch mode : executing %s",cmd); /* execute the command to retrieve the DISPLAY value to use */ f = popen(cmd,"r"); if ( f != NULL ) { if ( fscanf(f,"%255s",display) == 1 ) { if ( spank_setenv(sp,"DISPLAY",display,1) != ESPANK_SUCCESS ) { ERROR("x11: unable to set DISPLAY" " in job env"); status = -5; } else { INFO("x11: now using DISPLAY=%s", display); status=0; } } else { ERROR("x11: unable to get a DISPLAY value"); status = -6; } pclose(f); } else { ERROR("x11: unable to exec get cmd '%s'",cmd); status = -3; } } if ( cmd != NULL ) free(cmd); clean_exit: slurm_free_job_info_msg(job_buffer_ptr); exit: return status; }