static HRESULT WINAPI drm_create_adapter( int fd, ID3DAdapter9 **ppAdapter ) { struct d3dadapter9drm_context *ctx = CALLOC_STRUCT(d3dadapter9drm_context); HRESULT hr; int i, different_device; const struct drm_conf_ret *throttle_ret = NULL; const struct drm_conf_ret *dmabuf_ret = NULL; driOptionCache defaultInitOptions; driOptionCache userInitOptions; int throttling_value_user = -2; #if !GALLIUM_STATIC_TARGETS const char *paths[] = { getenv("D3D9_DRIVERS_PATH"), getenv("D3D9_DRIVERS_DIR"), PIPE_SEARCH_DIR }; #endif if (!ctx) { return E_OUTOFMEMORY; } ctx->base.destroy = drm_destroy; fd = loader_get_user_preferred_fd(fd, &different_device); ctx->fd = fd; ctx->base.linear_framebuffer = !!different_device; #if GALLIUM_STATIC_TARGETS ctx->base.hal = dd_create_screen(fd); #else /* use pipe-loader to dlopen appropriate drm driver */ if (!pipe_loader_drm_probe_fd(&ctx->dev, fd, FALSE)) { ERR("Failed to probe drm fd %d.\n", fd); FREE(ctx); close(fd); return D3DERR_DRIVERINTERNALERROR; } /* use pipe-loader to create a drm screen (hal) */ ctx->base.hal = NULL; for (i = 0; !ctx->base.hal && i < Elements(paths); ++i) { if (!paths[i]) { continue; } ctx->base.hal = pipe_loader_create_screen(ctx->dev, paths[i]); } #endif if (!ctx->base.hal) { ERR("Unable to load requested driver.\n"); drm_destroy(&ctx->base); return D3DERR_DRIVERINTERNALERROR; } #if GALLIUM_STATIC_TARGETS dmabuf_ret = dd_configuration(DRM_CONF_SHARE_FD); throttle_ret = dd_configuration(DRM_CONF_THROTTLE); #else dmabuf_ret = pipe_loader_configuration(ctx->dev, DRM_CONF_SHARE_FD); throttle_ret = pipe_loader_configuration(ctx->dev, DRM_CONF_THROTTLE); #endif // GALLIUM_STATIC_TARGETS if (!dmabuf_ret || !dmabuf_ret->val.val_bool) { ERR("The driver is not capable of dma-buf sharing." "Abandon to load nine state tracker\n"); drm_destroy(&ctx->base); return D3DERR_DRIVERINTERNALERROR; } if (throttle_ret && throttle_ret->val.val_int != -1) { ctx->base.throttling = TRUE; ctx->base.throttling_value = throttle_ret->val.val_int; } else ctx->base.throttling = FALSE; driParseOptionInfo(&defaultInitOptions, __driConfigOptionsNine); driParseConfigFiles(&userInitOptions, &defaultInitOptions, 0, "nine"); if (driCheckOption(&userInitOptions, "throttle_value", DRI_INT)) { throttling_value_user = driQueryOptioni(&userInitOptions, "throttle_value"); if (throttling_value_user == -1) ctx->base.throttling = FALSE; else if (throttling_value_user >= 0) { ctx->base.throttling = TRUE; ctx->base.throttling_value = throttling_value_user; } } if (driCheckOption(&userInitOptions, "vblank_mode", DRI_ENUM)) ctx->base.vblank_mode = driQueryOptioni(&userInitOptions, "vblank_mode"); else ctx->base.vblank_mode = 1; if (driCheckOption(&userInitOptions, "thread_submit", DRI_BOOL)) { ctx->base.thread_submit = driQueryOptionb(&userInitOptions, "thread_submit"); if (ctx->base.thread_submit && (throttling_value_user == -2 || throttling_value_user == 0)) { ctx->base.throttling_value = 0; } else if (ctx->base.thread_submit) { DBG("You have set a non standard throttling value in combination with thread_submit." "We advise to use a throttling value of -2/0"); } if (ctx->base.thread_submit && !different_device) DBG("You have set thread_submit but do not use a different device than the server." "You should not expect any benefit."); } driDestroyOptionCache(&userInitOptions); driDestroyOptionInfo(&defaultInitOptions); #if GALLIUM_STATIC_TARGETS ctx->base.ref = ninesw_create_screen(ctx->base.hal); #else /* wrap it to create a software screen that can share resources */ if (pipe_loader_sw_probe_wrapped(&ctx->swdev, ctx->base.hal)) { ctx->base.ref = NULL; for (i = 0; !ctx->base.ref && i < Elements(paths); ++i) { if (!paths[i]) { continue; } ctx->base.ref = pipe_loader_create_screen(ctx->swdev, paths[i]); } } #endif if (!ctx->base.ref) { ERR("Couldn't wrap drm screen to swrast screen. Software devices " "will be unavailable.\n"); } /* read out PCI info */ read_descriptor(&ctx->base, fd); /* create and return new ID3DAdapter9 */ hr = NineAdapter9_new(&ctx->base, (struct NineAdapter9 **)ppAdapter); if (FAILED(hr)) { drm_destroy(&ctx->base); return hr; } return D3D_OK; }
// Initialize and start PSL thread // // The return value is encode int a 16-bit value divided into 4 for each // possible adapter. Then the 4 bits in each adapter represent the 4 possible // AFUs on an adapter. For example: afu0.0 is 0x8000 and afu3.0 is 0x0008. uint16_t psl_init(struct psl **head, struct parms *parms, char *id, char *host, int port, pthread_mutex_t * lock, FILE * dbg_fp) { struct psl *psl; struct job_event *reset; uint16_t location; location = 0x8000; if ((psl = (struct psl *)calloc(1, sizeof(struct psl))) == NULL) { perror("malloc"); error_msg("Unable to allocation memory for psl"); goto init_fail; } psl->timeout = parms->timeout; if ((strlen(id) != 6) || strncmp(id, "afu", 3) || (id[4] != '.')) { warn_msg("Invalid afu name: %s", id); goto init_fail; } if ((id[3] < '0') || (id[3] > '3')) { warn_msg("Invalid afu major: %c", id[3]); goto init_fail; } if ((id[5] < '0') || (id[5] > '3')) { warn_msg("Invalid afu minor: %c", id[5]); goto init_fail; } psl->dbg_fp = dbg_fp; psl->major = id[3] - '0'; psl->minor = id[5] - '0'; psl->dbg_id = psl->major << 4; psl->dbg_id |= psl->minor; location >>= (4 * psl->major); location >>= psl->minor; if ((psl->name = (char *)malloc(strlen(id) + 1)) == NULL) { perror("malloc"); error_msg("Unable to allocation memory for psl->name"); goto init_fail; } strcpy(psl->name, id); if ((psl->host = (char *)malloc(strlen(host) + 1)) == NULL) { perror("malloc"); error_msg("Unable to allocation memory for psl->host"); goto init_fail; } strcpy(psl->host, host); psl->port = port; psl->client = NULL; psl->idle_cycles = PSL_IDLE_CYCLES; psl->lock = lock; // Connect to AFU psl->afu_event = (struct AFU_EVENT *)malloc(sizeof(struct AFU_EVENT)); if (psl->afu_event == NULL) { perror("malloc"); goto init_fail; } info_msg("Attempting to connect AFU: %s @ %s:%d", psl->name, psl->host, psl->port); if (psl_init_afu_event(psl->afu_event, psl->host, psl->port) != PSL_SUCCESS) { warn_msg("Unable to connect AFU: %s @ %s:%d", psl->name, psl->host, psl->port); goto init_fail; } // DEBUG debug_afu_connect(psl->dbg_fp, psl->dbg_id); // Initialize job handler if ((psl->job = job_init(psl->afu_event, &(psl->state), psl->name, psl->dbg_fp, psl->dbg_id)) == NULL) { perror("job_init"); goto init_fail; } // Initialize mmio handler if ((psl->mmio = mmio_init(psl->afu_event, psl->timeout, psl->name, psl->dbg_fp, psl->dbg_id)) == NULL) { perror("mmio_init"); goto init_fail; } // Initialize cmd handler if ((psl->cmd = cmd_init(psl->afu_event, parms, psl->mmio, &(psl->state), psl->name, psl->dbg_fp, psl->dbg_id)) == NULL) { perror("cmd_init"); goto init_fail; } // Set credits for AFU if (psl_aux1_change(psl->afu_event, psl->cmd->credits) != PSL_SUCCESS) { warn_msg("Unable to set credits"); goto init_fail; } // Start psl loop thread if (pthread_create(&(psl->thread), NULL, _psl_loop, psl)) { perror("pthread_create"); goto init_fail; } // Add psl to list while ((*head != NULL) && ((*head)->major < psl->major)) { head = &((*head)->_next); } while ((*head != NULL) && ((*head)->major == psl->major) && ((*head)->minor < psl->minor)) { head = &((*head)->_next); } psl->_next = *head; if (psl->_next != NULL) psl->_next->_prev = psl; *head = psl; // Send reset to AFU reset = add_job(psl->job, PSL_JOB_RESET, 0L); while (psl->job->job == reset) { /*infinite loop */ lock_delay(psl->lock); } // Read AFU descriptor psl->state = PSLSE_DESC; read_descriptor(psl->mmio, psl->lock); // Finish PSL configuration psl->state = PSLSE_IDLE; if (dedicated_mode_support(psl->mmio)) { // AFU supports Dedicated Mode psl->max_clients = 1; } if (directed_mode_support(psl->mmio)) { // AFU supports Directed Mode psl->max_clients = psl->mmio->desc.num_of_processes; } if (psl->max_clients == 0) { error_msg("AFU programming model is invalid"); goto init_fail; } psl->client = (struct client **)calloc(psl->max_clients, sizeof(struct client *)); psl->cmd->client = psl->client; psl->cmd->max_clients = psl->max_clients; return location; init_fail: if (psl) { if (psl->afu_event) { psl_close_afu_event(psl->afu_event); free(psl->afu_event); } if (psl->host) free(psl->host); if (psl->name) free(psl->name); free(psl); } pthread_mutex_unlock(lock); return 0; }
int exec_wait() { int i, j; int ret; int fd_max; int pid; int status; int finished; fd_set fds; /* Handle naive make1() which does not know if commands are running. */ if ( !cmdsrunning ) return 0; /* Process children that signaled. */ finished = 0; while ( !finished && cmdsrunning ) { /* Compute max read file descriptor for use in select(). */ populate_file_descriptors( &fd_max, &fds ); if ( 0 < globs.timeout ) { /* Force select() to timeout so we can terminate expired processes. */ tv.tv_sec = select_timeout; tv.tv_nsec = 0; /* select() will wait until: i/o on a descriptor, a signal, or we * time out. */ ret = pselect( fd_max + 1, &fds, 0, 0, &tv, &empty_sigmask ); } else { /* pselect() will wait until i/o on a descriptor or a signal. */ ret = pselect( fd_max + 1, &fds, 0, 0, 0, &empty_sigmask ); } if (-1 == ret && errno != EINTR) { perror("pselect()"); exit(-1); } if (0 < child_events) { /* child terminated via SIGCHLD */ for (i=0; i<MAXJOBS; ++i) { if (0 < terminated_children[i].pid) { pid_t pid = terminated_children[i].pid; /* get index of terminated pid */ for (j=0; j<globs.jobs; ++j) { if (pid == cmdtab[j].pid) { /* cleanup loose ends for terminated process */ close_streams(j, OUT); if ( globs.pipe_action != 0 ) close_streams(j, ERR); cleanup_child(j, terminated_children[i].status); --cmdsrunning; finished = 1; break; } } /* clear entry from list */ terminated_children[i].status = 0; terminated_children[i].pid = 0; --child_events; } } } if ( 0 < ret ) { for ( i = 0; i < globs.jobs; ++i ) { int out = 0; int err = 0; if ( FD_ISSET( cmdtab[ i ].fd[ OUT ], &fds ) ) out = read_descriptor( i, OUT ); if ( ( globs.pipe_action != 0 ) && ( FD_ISSET( cmdtab[ i ].fd[ ERR ], &fds ) ) ) err = read_descriptor( i, ERR ); /* If feof on either descriptor, then we are done. */ if ( out || err ) { /* Close the stream and pipe descriptors. */ close_streams( i, OUT ); if ( globs.pipe_action != 0 ) close_streams( i, ERR ); /* Reap the child and release resources. */ pid = waitpid( cmdtab[ i ].pid, &status, 0 ); if ( pid == cmdtab[ i ].pid ) { /* move into function so signal handler can also use */ finished = 1; cleanup_child(i, status); --cmdsrunning; } else { printf( "unknown pid %d with errno = %d\n", pid, errno ); exit( EXITBAD ); } } } } } return 1; }
int exec_wait() { int i; int ret; int fd_max; int pid; int status; int finished; int rstat; timing_info time_info; fd_set fds; struct tms new_time; /* Handle naive make1() which does not know if commands are running. */ if ( !cmdsrunning ) return 0; /* Process children that signaled. */ finished = 0; while ( !finished && cmdsrunning ) { /* Compute max read file descriptor for use in select(). */ populate_file_descriptors( &fd_max, &fds ); if ( 0 < globs.timeout ) { /* Force select() to timeout so we can terminate expired processes. */ tv.tv_sec = select_timeout; tv.tv_usec = 0; /* select() will wait until: i/o on a descriptor, a signal, or we * time out. */ ret = select( fd_max + 1, &fds, 0, 0, &tv ); } else { /* select() will wait until i/o on a descriptor or a signal. */ ret = select( fd_max + 1, &fds, 0, 0, 0 ); } if ( 0 < ret ) { for ( i = 0; i < globs.jobs; ++i ) { int out = 0; int err = 0; if ( FD_ISSET( cmdtab[ i ].fd[ OUT ], &fds ) ) out = read_descriptor( i, OUT ); if ( ( globs.pipe_action != 0 ) && ( FD_ISSET( cmdtab[ i ].fd[ ERR ], &fds ) ) ) err = read_descriptor( i, ERR ); /* If feof on either descriptor, then we are done. */ if ( out || err ) { /* Close the stream and pipe descriptors. */ close_streams( i, OUT ); if ( globs.pipe_action != 0 ) close_streams( i, ERR ); /* Reap the child and release resources. */ pid = waitpid( cmdtab[ i ].pid, &status, 0 ); if ( pid == cmdtab[ i ].pid ) { finished = 1; pid = 0; cmdtab[ i ].pid = 0; /* Set reason for exit if not timed out. */ if ( WIFEXITED( status ) ) { cmdtab[ i ].exit_reason = 0 == WEXITSTATUS( status ) ? EXIT_OK : EXIT_FAIL; } /* Print out the rule and target name. */ out_action( cmdtab[ i ].action, cmdtab[ i ].target, cmdtab[ i ].command, cmdtab[ i ].buffer[ OUT ], cmdtab[ i ].buffer[ ERR ], cmdtab[ i ].exit_reason ); times( &new_time ); time_info.system = (double)( new_time.tms_cstime - old_time.tms_cstime ) / CLOCKS_PER_SEC; time_info.user = (double)( new_time.tms_cutime - old_time.tms_cutime ) / CLOCKS_PER_SEC; time_info.start = cmdtab[ i ].start_dt; time_info.end = time( 0 ); old_time = new_time; /* Drive the completion. */ --cmdsrunning; if ( intr ) rstat = EXEC_CMD_INTR; else if ( status != 0 ) rstat = EXEC_CMD_FAIL; else rstat = EXEC_CMD_OK; /* Assume -p0 in effect so only pass buffer[ 0 ] * containing merged output. */ (*cmdtab[ i ].func)( cmdtab[ i ].closure, rstat, &time_info, cmdtab[ i ].command, cmdtab[ i ].buffer[ 0 ] ); BJAM_FREE( cmdtab[ i ].buffer[ OUT ] ); cmdtab[ i ].buffer[ OUT ] = 0; BJAM_FREE( cmdtab[ i ].buffer[ ERR ] ); cmdtab[ i ].buffer[ ERR ] = 0; BJAM_FREE( cmdtab[ i ].command ); cmdtab[ i ].command = 0; cmdtab[ i ].func = 0; cmdtab[ i ].closure = 0; cmdtab[ i ].start_time = 0; } else { printf( "unknown pid %d with errno = %d\n", pid, errno ); exit( EXITBAD ); } } } } } return 1; }
static HRESULT WINAPI drm_create_adapter( int fd, ID3DAdapter9 **ppAdapter ) { struct d3dadapter9drm_context *ctx = CALLOC_STRUCT(d3dadapter9drm_context); HRESULT hr; int i; const char *paths[] = { getenv("D3D9_DRIVERS_PATH"), getenv("D3D9_DRIVERS_DIR"), PIPE_SEARCH_DIR }; if (!ctx) { return E_OUTOFMEMORY; } ctx->base.resource_from_present = drm_resource_from_present; ctx->base.destroy = drm_destroy; /* use pipe-loader to dlopen appropriate drm driver */ if (!pipe_loader_drm_probe_fd(&ctx->dev, fd, FALSE)) { DBG("Failed to probe drm fd %d.\n", fd); FREE(ctx); close(fd); return D3DERR_DRIVERINTERNALERROR; } /* use pipe-loader to create a drm screen (hal) */ ctx->base.hal = NULL; for (i = 0; !ctx->base.hal && i < Elements(paths); ++i) { if (!paths[i]) { continue; } ctx->base.hal = pipe_loader_create_screen(ctx->dev, paths[i]); } if (!ctx->base.hal) { DBG("Unable to load requested driver.\n"); pipe_loader_release(&ctx->dev, 1); FREE(ctx); return D3DERR_DRIVERINTERNALERROR; } /* wrap it to create a software screen that can share resources */ if (pipe_loader_sw_probe_wrapped(&ctx->swdev, ctx->base.hal)) { ctx->base.ref = NULL; for (i = 0; !ctx->base.ref && i < Elements(paths); ++i) { if (!paths[i]) { continue; } ctx->base.ref = pipe_loader_create_screen(ctx->swdev, paths[i]); } } if (!ctx->base.ref) { DBG("Couldn't wrap drm screen to swrast screen. Software devices " "will be unavailable.\n"); } /* read out PCI info */ read_descriptor(&ctx->base, fd); /* create and return new ID3DAdapter9 */ hr = NineAdapter9_new(&ctx->base, (struct NineAdapter9 **)ppAdapter); if (FAILED(hr)) { if (ctx->swdev) { pipe_loader_release(&ctx->swdev, 1); } pipe_loader_release(&ctx->dev, 1); FREE(ctx); return hr; } return D3D_OK; }
void exec_wait() { int finished = 0; /* Process children that signaled. */ while ( !finished ) { int i; struct timeval tv; struct timeval * ptv = NULL; int select_timeout = globs.timeout; /* Check for timeouts: * - kill children that already timed out * - decide how long until the next one times out */ if ( globs.timeout > 0 ) { struct tms buf; clock_t const current = times( &buf ); for ( i = 0; i < globs.jobs; ++i ) if ( cmdtab[ i ].pid ) { clock_t const consumed = ( current - cmdtab[ i ].start_time ) / tps; if ( consumed >= globs.timeout ) { killpg( cmdtab[ i ].pid, SIGKILL ); cmdtab[ i ].exit_reason = EXIT_TIMEOUT; } else if ( globs.timeout - consumed < select_timeout ) select_timeout = globs.timeout - consumed; } /* If nothing else causes our select() call to exit, force it after * however long it takes for the next one of our child processes to * crossed its alloted processing time so we can terminate it. */ tv.tv_sec = select_timeout; tv.tv_usec = 0; ptv = &tv; } /* select() will wait for I/O on a descriptor, a signal, or timeout. */ { /* disable child termination signals while in select */ int ret; sigset_t sigmask; sigemptyset(&sigmask); sigaddset(&sigmask, SIGCHLD); sigprocmask(SIG_BLOCK, &sigmask, NULL); while ( ( ret = poll( wait_fds, WAIT_FDS_SIZE, select_timeout * 1000 ) ) == -1 ) if ( errno != EINTR ) break; /* restore original signal mask by unblocking sigchld */ sigprocmask(SIG_UNBLOCK, &sigmask, NULL); if ( ret <= 0 ) continue; } for ( i = 0; i < globs.jobs; ++i ) { int out_done = 0; int err_done = 0; if ( GET_WAIT_FD( i )[ OUT ].revents ) out_done = read_descriptor( i, OUT ); if ( globs.pipe_action && ( GET_WAIT_FD( i )[ ERR ].revents ) ) err_done = read_descriptor( i, ERR ); /* If feof on either descriptor, we are done. */ if ( out_done || err_done ) { int pid; int status; int rstat; timing_info time_info; struct rusage cmd_usage; /* We found a terminated child process - our search is done. */ finished = 1; /* Close the stream and pipe descriptors. */ close_streams( i, OUT ); if ( globs.pipe_action ) close_streams( i, ERR ); /* Reap the child and release resources. */ while ( ( pid = wait4( cmdtab[ i ].pid, &status, 0, &cmd_usage ) ) == -1 ) if ( errno != EINTR ) break; if ( pid != cmdtab[ i ].pid ) { err_printf( "unknown pid %d with errno = %d\n", pid, errno ); exit( EXITBAD ); } /* Set reason for exit if not timed out. */ if ( WIFEXITED( status ) ) cmdtab[ i ].exit_reason = WEXITSTATUS( status ) ? EXIT_FAIL : EXIT_OK; { time_info.system = ((double)(cmd_usage.ru_stime.tv_sec)*1000000.0+(double)(cmd_usage.ru_stime.tv_usec))/1000000.0; time_info.user = ((double)(cmd_usage.ru_utime.tv_sec)*1000000.0+(double)(cmd_usage.ru_utime.tv_usec))/1000000.0; timestamp_copy( &time_info.start, &cmdtab[ i ].start_dt ); timestamp_current( &time_info.end ); } /* Drive the completion. */ if ( interrupted() ) rstat = EXEC_CMD_INTR; else if ( status ) rstat = EXEC_CMD_FAIL; else rstat = EXEC_CMD_OK; /* Call the callback, may call back to jam rule land. */ (*cmdtab[ i ].func)( cmdtab[ i ].closure, rstat, &time_info, cmdtab[ i ].buffer[ OUT ], cmdtab[ i ].buffer[ ERR ], cmdtab[ i ].exit_reason ); /* Clean up the command's running commands table slot. */ BJAM_FREE( cmdtab[ i ].buffer[ OUT ] ); cmdtab[ i ].buffer[ OUT ] = 0; cmdtab[ i ].buf_size[ OUT ] = 0; BJAM_FREE( cmdtab[ i ].buffer[ ERR ] ); cmdtab[ i ].buffer[ ERR ] = 0; cmdtab[ i ].buf_size[ ERR ] = 0; cmdtab[ i ].pid = 0; cmdtab[ i ].func = 0; cmdtab[ i ].closure = 0; cmdtab[ i ].start_time = 0; } } } }
/*------------------------------------------------------------------ Disk management functions. These functions are not really a part of file system. They are provided for convenience in this emulated file system. ------------------------------------------------------------------ Restores the saved disk image in a file to the array. */ void FileSystem53::restore() { iosystem->restore(); // after restore // load all the result from the disk to the buffer // reload bytemap and desc_table char bytemap_buffer[B]; char desc_buffer[B]; iosystem->read_block(0, bytemap_buffer); iosystem->read_block(1, desc_buffer); for (int i = 0; i < B; i++) { //cout << "buffer[i] = " << i << " " << (int)bytemap_buffer[i] << endl; desc_table[0][i] = bytemap_buffer[i]; //cout << "desc_buffer[" << i << "] = " << (int)desc_buffer[i] << endl; } int start_pos = 0; for (int i = 1; i < MAX_FILE_NO+1; i++) { for (int j = 0; j < DESCR_SIZE; j++) { //desc_buffer_part[j] = desc_buffer[start_pos++]; desc_table[i][j] = desc_buffer[start_pos++]; //cout << "desc_buffer[" << start_pos << "] = " << (int)desc_buffer[i] << endl; } //cout << endl; } // load directory to OFT oft[0][0] = 1; char* dir_buffer_part = read_descriptor(1); for (int i = 0; i < DESCR_SIZE; i++) { oft[0][i+1] = dir_buffer_part[i]; //cout << "dir_buffer[" << i << "] = " << (int)oft[0][i+1] << endl; } oft[0][OFT_CURRENT_POSITION_INDEX] = 6; char filename_buffer[B]; start_pos = oft[0][OFT_CURRENT_POSITION_INDEX]; for (int i = 1; i < DESCR_SIZE; i++) { int block_index = oft[0][i+1]; if ( block_index == 0 ) { break; } iosystem->read_block(block_index, filename_buffer); for (int j = 0; j < B; j++) { if ( filename_buffer[j] == 0 ) break; oft[0][start_pos++] = filename_buffer[j]; } } //print_desc_table(); //print_oft(); }
void exec_wait() { int finished = 0; /* Process children that signaled. */ while ( !finished ) { int i; struct timeval tv; struct timeval * ptv = NULL; int select_timeout = globs.timeout; /* Prepare file descriptor information for use in select(). */ fd_set fds; int const fd_max = populate_file_descriptors( &fds ); /* Check for timeouts: * - kill children that already timed out * - decide how long until the next one times out */ if ( globs.timeout > 0 ) { struct tms buf; clock_t const current = times( &buf ); for ( i = 0; i < globs.jobs; ++i ) if ( cmdtab[ i ].pid ) { clock_t const consumed = ( current - cmdtab[ i ].start_time ) / tps; if ( consumed >= globs.timeout ) { killpg( cmdtab[ i ].pid, SIGKILL ); cmdtab[ i ].exit_reason = EXIT_TIMEOUT; } else if ( globs.timeout - consumed < select_timeout ) select_timeout = globs.timeout - consumed; } /* If nothing else causes our select() call to exit, force it after * however long it takes for the next one of our child processes to * crossed its alloted processing time so we can terminate it. */ tv.tv_sec = select_timeout; tv.tv_usec = 0; ptv = &tv; } /* select() will wait for I/O on a descriptor, a signal, or timeout. */ { int ret; while ( ( ret = select( fd_max + 1, &fds, 0, 0, ptv ) ) == -1 ) if ( errno != EINTR ) break; if ( ret <= 0 ) continue; } for ( i = 0; i < globs.jobs; ++i ) { int out_done = 0; int err_done = 0; if ( FD_ISSET( cmdtab[ i ].fd[ OUT ], &fds ) ) out_done = read_descriptor( i, OUT ); if ( globs.pipe_action && FD_ISSET( cmdtab[ i ].fd[ ERR ], &fds ) ) err_done = read_descriptor( i, ERR ); /* If feof on either descriptor, we are done. */ if ( out_done || err_done ) { int pid; int status; int rstat; timing_info time_info; /* We found a terminated child process - our search is done. */ finished = 1; /* Close the stream and pipe descriptors. */ close_streams( i, OUT ); if ( globs.pipe_action ) close_streams( i, ERR ); /* Reap the child and release resources. */ while ( ( pid = waitpid( cmdtab[ i ].pid, &status, 0 ) ) == -1 ) if ( errno != EINTR ) break; if ( pid != cmdtab[ i ].pid ) { printf( "unknown pid %d with errno = %d\n", pid, errno ); exit( EXITBAD ); } /* Set reason for exit if not timed out. */ if ( WIFEXITED( status ) ) cmdtab[ i ].exit_reason = WEXITSTATUS( status ) ? EXIT_FAIL : EXIT_OK; { struct tms new_time; times( &new_time ); time_info.system = (double)( new_time.tms_cstime - old_time.tms_cstime ) / CLOCKS_PER_SEC; time_info.user = (double)( new_time.tms_cutime - old_time.tms_cutime ) / CLOCKS_PER_SEC; timestamp_copy( &time_info.start, &cmdtab[ i ].start_dt ); timestamp_current( &time_info.end ); old_time = new_time; } /* Drive the completion. */ if ( interrupted() ) rstat = EXEC_CMD_INTR; else if ( status ) rstat = EXEC_CMD_FAIL; else rstat = EXEC_CMD_OK; /* Call the callback, may call back to jam rule land. */ (*cmdtab[ i ].func)( cmdtab[ i ].closure, rstat, &time_info, cmdtab[ i ].buffer[ OUT ], cmdtab[ i ].buffer[ ERR ], cmdtab[ i ].exit_reason ); /* Clean up the command's running commands table slot. */ BJAM_FREE( cmdtab[ i ].buffer[ OUT ] ); cmdtab[ i ].buffer[ OUT ] = 0; cmdtab[ i ].buf_size[ OUT ] = 0; BJAM_FREE( cmdtab[ i ].buffer[ ERR ] ); cmdtab[ i ].buffer[ ERR ] = 0; cmdtab[ i ].buf_size[ ERR ] = 0; cmdtab[ i ].pid = 0; cmdtab[ i ].func = 0; cmdtab[ i ].closure = 0; cmdtab[ i ].start_time = 0; } } } }
static HRESULT WINAPI drm_create_adapter( int fd, ID3DAdapter9 **ppAdapter ) { struct d3dadapter9drm_context *ctx = CALLOC_STRUCT(d3dadapter9drm_context); HRESULT hr; bool different_device; driOptionCache defaultInitOptions; driOptionCache userInitOptions; int throttling_value_user = -2; int override_vendorid = -1; if (!ctx) { return E_OUTOFMEMORY; } ctx->base.destroy = drm_destroy; /* Although the fd is provided from external source, mesa/nine * takes ownership of it. */ fd = loader_get_user_preferred_fd(fd, &different_device); ctx->fd = fd; ctx->base.linear_framebuffer = different_device; if (!pipe_loader_drm_probe_fd(&ctx->dev, fd)) { ERR("Failed to probe drm fd %d.\n", fd); FREE(ctx); close(fd); return D3DERR_DRIVERINTERNALERROR; } ctx->base.hal = pipe_loader_create_screen(ctx->dev); if (!ctx->base.hal) { ERR("Unable to load requested driver.\n"); drm_destroy(&ctx->base); return D3DERR_DRIVERINTERNALERROR; } if (!ctx->base.hal->get_param(ctx->base.hal, PIPE_CAP_DMABUF)) { ERR("The driver is not capable of dma-buf sharing." "Abandon to load nine state tracker\n"); drm_destroy(&ctx->base); return D3DERR_DRIVERINTERNALERROR; } /* Previously was set to PIPE_CAP_MAX_FRAMES_IN_FLIGHT, * but the change of value of this cap to 1 seems to cause * regressions. */ ctx->base.throttling_value = 2; ctx->base.throttling = ctx->base.throttling_value > 0; driParseOptionInfo(&defaultInitOptions, __driConfigOptionsNine); driParseConfigFiles(&userInitOptions, &defaultInitOptions, 0, "nine", NULL); if (driCheckOption(&userInitOptions, "throttle_value", DRI_INT)) { throttling_value_user = driQueryOptioni(&userInitOptions, "throttle_value"); if (throttling_value_user == -1) ctx->base.throttling = FALSE; else if (throttling_value_user >= 0) { ctx->base.throttling = TRUE; ctx->base.throttling_value = throttling_value_user; } } if (driCheckOption(&userInitOptions, "vblank_mode", DRI_ENUM)) ctx->base.vblank_mode = driQueryOptioni(&userInitOptions, "vblank_mode"); else ctx->base.vblank_mode = 1; if (driCheckOption(&userInitOptions, "thread_submit", DRI_BOOL)) ctx->base.thread_submit = driQueryOptionb(&userInitOptions, "thread_submit"); else ctx->base.thread_submit = different_device; if (driCheckOption(&userInitOptions, "override_vendorid", DRI_INT)) { override_vendorid = driQueryOptioni(&userInitOptions, "override_vendorid"); } if (driCheckOption(&userInitOptions, "discard_delayed_release", DRI_BOOL)) ctx->base.discard_delayed_release = driQueryOptionb(&userInitOptions, "discard_delayed_release"); else ctx->base.discard_delayed_release = TRUE; if (driCheckOption(&userInitOptions, "tearfree_discard", DRI_BOOL)) ctx->base.tearfree_discard = driQueryOptionb(&userInitOptions, "tearfree_discard"); else ctx->base.tearfree_discard = FALSE; if (ctx->base.tearfree_discard && !ctx->base.discard_delayed_release) { ERR("tearfree_discard requires discard_delayed_release\n"); ctx->base.tearfree_discard = FALSE; } if (driCheckOption(&userInitOptions, "csmt_force", DRI_INT)) ctx->base.csmt_force = driQueryOptioni(&userInitOptions, "csmt_force"); else ctx->base.csmt_force = -1; if (driCheckOption(&userInitOptions, "dynamic_texture_workaround", DRI_BOOL)) ctx->base.dynamic_texture_workaround = driQueryOptionb(&userInitOptions, "dynamic_texture_workaround"); else ctx->base.dynamic_texture_workaround = FALSE; if (driCheckOption(&userInitOptions, "shader_inline_constants", DRI_BOOL)) ctx->base.shader_inline_constants = driQueryOptionb(&userInitOptions, "shader_inline_constants"); else ctx->base.shader_inline_constants = FALSE; driDestroyOptionCache(&userInitOptions); driDestroyOptionInfo(&defaultInitOptions); /* wrap it to create a software screen that can share resources */ if (pipe_loader_sw_probe_wrapped(&ctx->swdev, ctx->base.hal)) ctx->base.ref = pipe_loader_create_screen(ctx->swdev); if (!ctx->base.ref) { ERR("Couldn't wrap drm screen to swrast screen. Software devices " "will be unavailable.\n"); } /* read out PCI info */ read_descriptor(&ctx->base, fd, override_vendorid); /* create and return new ID3DAdapter9 */ hr = NineAdapter9_new(&ctx->base, (struct NineAdapter9 **)ppAdapter); if (FAILED(hr)) { drm_destroy(&ctx->base); return hr; } return D3D_OK; }