int main() { struct icmp_packet packet; char *src_ip; char *dest_ip; int sock_fd; src_ip = "127.0.0.2"; dest_ip = "127.0.0.1"; strcpy(packet.src_addr, src_ip); strcpy(packet.dest_addr, dest_ip); set_reply_type(&packet); packet.payload = "ZZZZZZ"; packet.payload_size = strlen(packet.payload); sock_fd = open_icmp_socket(); send_icmp_packet(sock_fd, &packet); close_icmp_socket(sock_fd); }
int req_stat_svr( struct batch_request *preq) /* ptr to the decoded request */ { svrattrl *pal; struct batch_reply *preply; struct brp_status *pstat; int bad = 0; char nc_buf[128]; int numjobs; int netrates[3]; memset(netrates, 0, sizeof(netrates)); /* update count and state counts from sv_numjobs and sv_jobstates */ lock_sv_qs_mutex(server.sv_qs_mutex, __func__); numjobs = server.sv_qs.sv_numjobs; unlock_sv_qs_mutex(server.sv_qs_mutex, __func__); pthread_mutex_lock(server.sv_attr_mutex); server.sv_attr[SRV_ATR_TotalJobs].at_val.at_long = numjobs; server.sv_attr[SRV_ATR_TotalJobs].at_flags |= ATR_VFLAG_SET; pthread_mutex_lock(server.sv_jobstates_mutex); update_state_ct( &server.sv_attr[SRV_ATR_JobsByState], server.sv_jobstates, server.sv_jobstbuf); pthread_mutex_unlock(server.sv_jobstates_mutex); netcounter_get(netrates); snprintf(nc_buf, 127, "%d %d %d", netrates[0], netrates[1], netrates[2]); if (server.sv_attr[SRV_ATR_NetCounter].at_val.at_str != NULL) free(server.sv_attr[SRV_ATR_NetCounter].at_val.at_str); server.sv_attr[SRV_ATR_NetCounter].at_val.at_str = strdup(nc_buf); if (server.sv_attr[SRV_ATR_NetCounter].at_val.at_str != NULL) server.sv_attr[SRV_ATR_NetCounter].at_flags |= ATR_VFLAG_SET; pthread_mutex_unlock(server.sv_attr_mutex); /* allocate a reply structure and a status sub-structure */ preply = &preq->rq_reply; set_reply_type(preply, BATCH_REPLY_CHOICE_Status); CLEAR_HEAD(preply->brp_un.brp_status); pstat = (struct brp_status *)calloc(1, sizeof(struct brp_status)); if (pstat == NULL) { reply_free(preply); req_reject(PBSE_SYSTEM, 0, preq, NULL, NULL); pthread_mutex_unlock(server.sv_attr_mutex); return(PBSE_SYSTEM); } CLEAR_LINK(pstat->brp_stlink); strcpy(pstat->brp_objname, server_name); pstat->brp_objtype = MGR_OBJ_SERVER; CLEAR_HEAD(pstat->brp_attr); append_link(&preply->brp_un.brp_status, &pstat->brp_stlink, pstat); /* add attributes to the status reply */ pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); if (status_attrib( pal, svr_attr_def, server.sv_attr, SRV_ATR_LAST, preq->rq_perm, &pstat->brp_attr, false, &bad, 1)) /* IsOwner == TRUE */ { reply_badattr(PBSE_NOATTR, bad, pal, preq); } else { reply_send_svr(preq); } return(PBSE_NONE); } /* END req_stat_svr() */
int req_stat_job( struct batch_request *preq) /* ptr to the decoded request */ { struct stat_cntl cntl; /* see svrfunc.h */ char *name; job *pjob = NULL; pbs_queue *pque = NULL; int rc = PBSE_NONE; char log_buf[LOCAL_LOG_BUF_SIZE]; bool condensed = false; enum TJobStatTypeEnum type = tjstNONE; /* * first, validate the name of the requested object, either * a job, a queue, or the whole server. */ if (LOGLEVEL >= 7) { sprintf(log_buf, "note"); log_event(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf); } /* FORMAT: name = { <JOBID> | <QUEUEID> | '' } */ name = preq->rq_ind.rq_status.rq_id; if (preq->rq_extend != NULL) { /* evaluate pbs_job_stat() 'extension' field */ if (!strncasecmp(preq->rq_extend, "truncated", strlen("truncated"))) { /* truncate response by 'max_report' */ type = tjstTruncatedServer; } else if (!strncasecmp(preq->rq_extend, "summarize_arrays", strlen("summarize_arrays"))) { type = tjstSummarizeArraysServer; } if (preq->rq_extend[strlen(preq->rq_extend) - 1] == 'C') { condensed = true; } } /* END if (preq->rq_extend != NULL) */ if (isdigit((int)*name)) { /* status a single job */ if (is_array(name)) { if (type != tjstSummarizeArraysServer) { type = tjstArray; } } else { type = tjstJob; if ((pjob = svr_find_job(name, FALSE)) == NULL) { rc = PBSE_UNKJOBID; } else unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); } } else if (isalpha(name[0])) { if (type == tjstNONE) type = tjstQueue; else if (type == tjstSummarizeArraysServer) type = tjstSummarizeArraysQueue; else type = tjstTruncatedQueue; /* if found, this mutex is released later */ if ((pque = find_queuebyname(name)) == NULL) { rc = PBSE_UNKQUE; } } else if ((*name == '\0') || (*name == '@')) { /* status all jobs at server */ if (type == tjstNONE) type = tjstServer; } else { rc = PBSE_IVALREQ; } if (rc != 0) { /* is invalid - an error */ req_reject(rc, 0, preq, NULL, NULL); return(rc); } set_reply_type(&preq->rq_reply, BATCH_REPLY_CHOICE_Status); CLEAR_HEAD(preq->rq_reply.brp_un.brp_status); if ((type == tjstTruncatedQueue) || (type == tjstTruncatedServer)) { if (pque != NULL) { unlock_queue(pque, __func__, "", LOGLEVEL); pque = NULL; } } memset(&cntl, 0, sizeof(cntl)); cntl.sc_type = (int)type; cntl.sc_conn = -1; cntl.sc_pque = pque; cntl.sc_origrq = preq; cntl.sc_post = req_stat_job_step2; cntl.sc_jobid[0] = '\0'; /* cause "start from beginning" */ cntl.sc_condensed = condensed; req_stat_job_step2(&cntl); /* go to step 2, see if running is current */ if (pque != NULL) unlock_queue(pque, "req_stat_job", (char *)"success", LOGLEVEL); return(PBSE_NONE); } /* END req_stat_job() */
int req_stat_node( struct batch_request *preq) { char *name; int rc = PBSE_NONE; int type = 0; int bad = 0; struct pbsnode *pnode = NULL; struct batch_reply *preply; prop props; svrattrl *pal; /* * first, check that the server indeed has a list of nodes * and if it does, validate the name of the requested object-- * either name is that of a specific node, or name[0] is null/@ * meaning request is for all nodes in the server's jurisdiction */ if (LOGLEVEL >= 6) { log_record( PBSEVENT_SCHED, PBS_EVENTCLASS_REQUEST, __func__, "entered"); } if (svr_totnodes <= 0) { rc = PBSE_NONODES; req_reject(rc, 0, preq, NULL, (svr_unresolvednodes == 0)? "node list is empty - check 'server_priv/nodes' file": "none of the nodes in the 'server_priv/nodes' file resolves to a valid address"); return rc; } name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name == '@')) { type = 1; } else if ((*name == ':') && (*(name + 1) != '\0')) { if (!strcmp(name + 1, "ALL")) { type = 1; /* psuedo-group for all nodes */ } else { type = 2; props.name = name + 1; props.mark = 1; } } preply = &preq->rq_reply; set_reply_type(preply, BATCH_REPLY_CHOICE_Status); CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the named node */ pnode = find_nodebyname(name); if (pnode == NULL) { rc = PBSE_UNKNODE; req_reject(rc, 0, preq, NULL, "cannot locate specified node"); return(rc); } /* get the status on all of the numa nodes */ if (pnode->nd_is_alps_reporter == TRUE) rc = get_alps_statuses(pnode, preq, &bad, &preply->brp_un.brp_status); else rc = get_numa_statuses(pnode, preq, &bad, &preply->brp_un.brp_status); pnode->unlock_node(__func__, "type == 0", LOGLEVEL); } else { /* get status of all or several nodes */ all_nodes_iterator *iter = NULL; std::vector<prop> plist; plist.push_back(props); while ((pnode = next_host(&allnodes,&iter,NULL)) != NULL) { if ((type == 2) && (!pnode->hasprop(&plist))) { pnode->unlock_node(__func__, "type != 0, next_host", LOGLEVEL); continue; } /* get the status on all of the numa nodes */ if (pnode->nd_is_alps_reporter == TRUE) rc = get_alps_statuses(pnode, preq, &bad, &preply->brp_un.brp_status); else rc = get_numa_statuses(pnode, preq, &bad, &preply->brp_un.brp_status); if (rc != PBSE_NONE) { pnode->unlock_node(__func__, "type != 0, rc != 0, get_numa_statuses", LOGLEVEL); break; } pnode->unlock_node(__func__, "type != 0, rc == 0, get_numa_statuses", LOGLEVEL); } if (iter != NULL) delete iter; } if (rc == PBSE_NONE) { /* SUCCESS */ reply_send_svr(preq); } else { if (rc != PBSE_UNKNODEATR) { req_reject(rc, 0, preq, NULL, NULL); } else { pal = (svrattrl *)GET_NEXT(preq->rq_ind.rq_status.rq_attr); reply_badattr(rc, bad, pal, preq); } } return(rc); } /* END req_stat_node() */
int req_stat_que( batch_request *preq) { char *name; pbs_queue *pque = NULL; struct batch_reply *preply; int rc = 0; int type = 0; char log_buf[LOCAL_LOG_BUF_SIZE+1]; /* * first, validate the name of the requested object, either * a queue, or null for all queues */ name = preq->rq_ind.rq_status.rq_id; if ((*name == '\0') || (*name == '@')) { type = 1; } else { pque = find_queuebyname(name); if (pque == NULL) { rc = PBSE_UNKQUE; snprintf(log_buf, LOCAL_LOG_BUF_SIZE, "cannot locate queue %s", name); req_reject(rc, 0, preq, NULL, log_buf); return rc; } } preply = &preq->rq_reply; set_reply_type(preply, BATCH_REPLY_CHOICE_Status); CLEAR_HEAD(preply->brp_un.brp_status); if (type == 0) { /* get status of the named queue */ mutex_mgr pque_mutex = mutex_mgr(pque->qu_mutex, true); rc = status_que(pque, preq, &preply->brp_un.brp_status); /* pque_qu_mutex will be unlocked in the destructor when we leave this scope */ } else { /* pque == NULL before next_queue */ svr_queues.lock(); all_queues_iterator *iter = svr_queues.get_iterator(); svr_queues.unlock(); /* get status of all queues */ while ((pque = next_queue(&svr_queues,iter)) != NULL) { mutex_mgr pque_mutex = mutex_mgr(pque->qu_mutex, true); rc = status_que(pque, preq, &preply->brp_un.brp_status); if (rc != 0) { if (rc != PBSE_PERM) { break; } rc = 0; } } delete iter; } if (rc != PBSE_NONE) { reply_free(preply); req_reject(PBSE_NOATTR, rc, preq, NULL, "status_queue failed"); } else { reply_send_svr(preq); } return rc; } /* END req_stat_que() */
/** * Function to run the tunnel */ void run_tunnel(char *dest, int server) { struct icmp_packet packet; int tun_fd, sock_fd; fd_set fs; tun_fd = tun_alloc("tun0", IFF_TUN | IFF_NO_PI); printf("[DEBUG] Starting tunnel - Dest: %s, Server: %d\n", dest, server); printf("[DEBUG] Opening ICMP socket\n"); sock_fd = open_icmp_socket(); if (server) { printf("[DEBUG] Binding ICMP socket\n"); bind_icmp_socket(sock_fd); } configure_network(server); while (1) { FD_ZERO(&fs); FD_SET(tun_fd, &fs); FD_SET(sock_fd, &fs); select(tun_fd>sock_fd?tun_fd+1:sock_fd+1, &fs, NULL, NULL, NULL); if (FD_ISSET(tun_fd, &fs)) { printf("[DEBUG] Data needs to be readed from tun device\n"); // Reading data from tun device and sending ICMP packet printf("[DEBUG] Preparing ICMP packet to be sent\n"); // Preparing ICMP packet to be sent memset(&packet, 0, sizeof(struct icmp_packet)); printf("[DEBUG] Destination address: %s\n", dest); strcpy(packet.src_addr, "0.0.0.0"); strcpy(packet.dest_addr, dest); if(server) { set_reply_type(&packet); } else { set_echo_type(&packet); } packet.payload = malloc(MTU); packet.payload_size = tun_read(tun_fd, packet.payload, MTU); if(packet.payload_size == -1) { perror("Error while reading from tun device\n"); exit(EXIT_FAILURE); } printf("[DEBUG] Sending ICMP packet with payload_size: %d, payload: %s\n", packet.payload_size, packet.payload); // Sending ICMP packet send_icmp_packet(sock_fd, &packet); free(packet.payload); } if (FD_ISSET(sock_fd, &fs)) { printf("[DEBUG] Received ICMP packet\n"); // Reading data from remote socket and sending to tun device // Getting ICMP packet memset(&packet, 0, sizeof(struct icmp_packet)); receive_icmp_packet(sock_fd, &packet); printf("[DEBUG] Read ICMP packet with src: %s, dest: %s, payload_size: %d, payload: %s\n", packet.src_addr, packet.dest_addr, packet.payload_size, packet.payload); // Writing out to tun device tun_write(tun_fd, packet.payload, packet.payload_size); printf("[DEBUG] Src address being copied: %s\n", packet.src_addr); strcpy(dest, packet.src_addr); } } }
int send_request_to_remote_server( int conn, batch_request *request, bool close_handle) { struct attropl *patrl; struct svrattrl *psvratl; int rc = PBSE_NONE; int tmp_rc = PBSE_NONE; int sock = 0; char log_buf[LOCAL_LOG_BUF_SIZE]; struct tcp_chan *chan = NULL; pthread_mutex_lock(connection[conn].ch_mutex); sock = connection[conn].ch_socket; pthread_mutex_unlock(connection[conn].ch_mutex); request->rq_conn = sock; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0); if ((chan = DIS_tcp_setup(sock)) == NULL) { log_err(PBSE_MEM_MALLOC, __func__, "Could not allocate memory for socket buffer"); close_conn(sock, FALSE); return(PBSE_MEM_MALLOC); } /* the request is bound to another server, encode/send the request */ switch (request->rq_type) { case PBS_BATCH_DeleteJob: rc = PBSD_mgr_put( conn, PBS_BATCH_DeleteJob, MGR_CMD_DELETE, MGR_OBJ_JOB, request->rq_ind.rq_delete.rq_objname, NULL, NULL); break; case PBS_BATCH_HoldJob: attrl_fixlink(&request->rq_ind.rq_hold.rq_orig.rq_attr); psvratl = (struct svrattrl *)GET_NEXT(request->rq_ind.rq_hold.rq_orig.rq_attr); patrl = &psvratl->al_atopl; rc = PBSD_mgr_put( conn, PBS_BATCH_HoldJob, MGR_CMD_SET, MGR_OBJ_JOB, request->rq_ind.rq_hold.rq_orig.rq_objname, patrl, NULL); break; case PBS_BATCH_CheckpointJob: rc = PBSD_mgr_put( conn, PBS_BATCH_CheckpointJob, MGR_CMD_SET, MGR_OBJ_JOB, request->rq_ind.rq_hold.rq_orig.rq_objname, NULL, NULL); break; case PBS_BATCH_GpuCtrl: rc = PBSD_gpu_put( conn, request->rq_ind.rq_gpuctrl.rq_momnode, request->rq_ind.rq_gpuctrl.rq_gpuid, request->rq_ind.rq_gpuctrl.rq_gpumode, request->rq_ind.rq_gpuctrl.rq_reset_perm, request->rq_ind.rq_gpuctrl.rq_reset_vol, NULL); break; case PBS_BATCH_MessJob: rc = PBSD_msg_put( conn, request->rq_ind.rq_message.rq_jid, request->rq_ind.rq_message.rq_file, request->rq_ind.rq_message.rq_text, NULL); break; case PBS_BATCH_ModifyJob: case PBS_BATCH_AsyModifyJob: attrl_fixlink(&request->rq_ind.rq_modify.rq_attr); patrl = (struct attropl *) & ((struct svrattrl *)GET_NEXT( request->rq_ind.rq_modify.rq_attr))->al_atopl; rc = PBSD_mgr_put( conn, request->rq_type, MGR_CMD_SET, MGR_OBJ_JOB, request->rq_ind.rq_modify.rq_objname, patrl, NULL); break; case PBS_BATCH_Rerun: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_Rerun, msg_daemonname))) break; if ((rc = encode_DIS_JobId(chan, request->rq_ind.rq_rerun))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_RegistDep: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_RegistDep, msg_daemonname))) break; if ((rc = encode_DIS_Register(chan, request))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_AsySignalJob: case PBS_BATCH_SignalJob: rc = PBSD_sig_put( conn, (char *)request->rq_ind.rq_signal.rq_jid, (char *)request->rq_ind.rq_signal.rq_signame, (char *)request->rq_extra); break; case PBS_BATCH_StatusJob: rc = PBSD_status_put( conn, PBS_BATCH_StatusJob, request->rq_ind.rq_status.rq_id, NULL, NULL); break; case PBS_BATCH_TrackJob: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_TrackJob, msg_daemonname))) break; if ((rc = encode_DIS_TrackJob(chan, request))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_ReturnFiles: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_ReturnFiles, msg_daemonname))) break; if ((rc = encode_DIS_ReturnFiles(chan, request))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_CopyFiles: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_CopyFiles, msg_daemonname))) break; if ((rc = encode_DIS_CopyFiles(chan, request))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_DelFiles: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_DelFiles, msg_daemonname))) break; if ((rc = encode_DIS_CopyFiles(chan, request))) break; if ((rc = encode_DIS_ReqExtend(chan, 0))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_DeleteReservation: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_DeleteReservation, msg_daemonname))) break; if ((rc = encode_DIS_ReqExtend(chan, request->rq_extend))) break; rc = DIS_tcp_wflush(chan); break; case PBS_BATCH_ChangePowerState: if ((rc = encode_DIS_ReqHdr(chan, PBS_BATCH_ChangePowerState, msg_daemonname))) break; if ((rc = encode_DIS_PowerState(chan, request->rq_ind.rq_powerstate))) break; if ((rc = encode_DIS_ReqExtend(chan, request->rq_extend))) break; rc = DIS_tcp_wflush(chan); break; default: sprintf(log_buf, msg_issuebad, request->rq_type); log_err(-1, __func__, log_buf); rc = -1; break; } /* END switch (request->rq_type) */ if ((tmp_rc = DIS_reply_read(chan, &request->rq_reply)) != PBSE_NONE) { sprintf(log_buf, "DIS_reply_read failed: %d", tmp_rc); log_record(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf); if (chan->IsTimeout) request->rq_reply.brp_code = PBSE_TIMEOUT; else request->rq_reply.brp_code = tmp_rc; set_reply_type(&request->rq_reply, BATCH_REPLY_CHOICE_NULL); } DIS_tcp_cleanup(chan); if (close_handle == true) svr_disconnect(conn); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, 0); return(rc); } /* END send_request_to_remote_server() */
int req_locatejob( batch_request *preq) { int rc = PBSE_NONE; char *at; int i; job *pjob; char *location = (char *)0; if ((at = strchr(preq->rq_ind.rq_locate, (int)'@'))) *at = '\0'; /* strip off @server_name */ pjob = svr_find_job(preq->rq_ind.rq_locate, FALSE); if (pjob) { unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); location = server_name; } else { mutex_mgr track_mgr(server.sv_track_mutex, false); for (i = 0; i < server.sv_tracksize; i++) { if ((server.sv_track + i)->tk_mtime && !strcmp((server.sv_track + i)->tk_jobid, preq->rq_ind.rq_locate)) { location = (server.sv_track + i)->tk_location; break; } } } if (location != NULL) { set_reply_type(&preq->rq_reply, BATCH_REPLY_CHOICE_Locate); preq->rq_reply.brp_code = 0; preq->rq_reply.brp_auxcode = 0; snprintf(preq->rq_reply.brp_un.brp_locate, sizeof(preq->rq_reply.brp_un.brp_locate), "%s", location); reply_send_svr(preq); } else { if (LOGLEVEL >= 7) { log_event( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_locate, "cannot find job in server tracking list"); } rc = PBSE_UNKJOBID; req_reject(rc, 0, preq, NULL, NULL); } return(rc); } /* END req_locatejob() */