Esempio n. 1
0
int gpuvm_pre_init(int flags) {
	if(flags != GPUVM_THREADS_BEFORE_INIT && 
		 flags != GPUVM_THREADS_AFTER_INIT) {
		fprintf(stderr, "gpuvm_pre_init: invalid flags\n");
		return GPUVM_EARG;
	}
	if(flags == GPUVM_THREADS_BEFORE_INIT) {
		// case before OpenCL initialization
		if(pre_runtime_threads_g)	{		
			free(pre_runtime_threads_g);
			pre_runtime_threads_g = 0;
			pre_runtime_nthreads_g = 0;
		}
		int pre_runtime_nthreads = get_threads(&pre_runtime_threads_g);
		if(pre_runtime_nthreads < 0)
			return -1;
		pre_runtime_nthreads_g = pre_runtime_nthreads;
		//fprintf(stderr, "%d threads before OpenCL runtime init\n", 
		//				pre_runtime_nthreads_g);
		return 0;
	} else if(flags == GPUVM_THREADS_AFTER_INIT) {
		// case after OpenCL initialization
		if(!pre_runtime_threads_g) {
			fprintf(stderr, "gpuvm_pre_init: list of threads must be recorded "
							"first\n");
			return GPUVM_ESTATE;			
		}
		thread_t *after_runtime_threads, *diff_threads;
		// get new threads
		int after_runtime_nthreads = get_threads(&after_runtime_threads);
		if(after_runtime_nthreads < 0)
			return after_runtime_nthreads;
		//fprintf(stderr, "%d threads after OpenCL runtime init\n", 
		//				after_runtime_nthreads);
		// get difference
		int diff_nthreads = threads_diff
			(&diff_threads, after_runtime_threads, after_runtime_nthreads,
			 pre_runtime_threads_g, pre_runtime_nthreads_g);
		if(diff_nthreads < 0) {
			free(after_runtime_threads);
			return diff_nthreads;
		}
		// save difference as "immune threads"
		if(diff_nthreads > MAX_NTHREADS) {
			fprintf(stderr, "gpuvm_pre_init: too many immune threads\n");
			return -1;
		}
		immune_nthreads_g = diff_nthreads;
		//fprintf(stderr, "immune_nthreads_g = %d\n", immune_nthreads_g);
		memcpy(immune_threads_g, diff_threads, immune_nthreads_g * sizeof(thread_t));
		free(after_runtime_threads);
		free(diff_threads);
		return 0;
	}  // if(flags == ...)
}
Esempio n. 2
0
int backtrace_ptrace(int pid, int *tids, int *index, int nr_tids)
{
#if !defined (NO_LIBUNWIND_PTRACE)
    int i, count, rc = 0;
    int *threads = NULL;

    count = get_threads(pid, &threads);
    if (!count || threads == NULL)
        return -1;

    if (tids != NULL) {
        if (adjust_threads(threads, count, tids, index, nr_tids) < 0)
            return -1;

        free(threads);
        count = nr_tids;
        threads = tids;
    }

    if (attach_process(pid) < 0)
        return -1;

    for (i = 0; i < count; ++i) {
        void *upt_info;

        printf("--------------------  thread %d (%d)  --------------------\n",
               (index != NULL ? index[i] : i+1), threads[i]);

        if (threads[i] != pid && attach_thread(threads[i]) < 0) {
            rc = -1;
            break;
        }

        upt_info = _UPT_create(threads[i]);

        if (backtrace_thread(&_UPT_accessors, upt_info) < 0)
            rc = -1;

        _UPT_destroy(upt_info);

        if (threads[i] != pid && detach_thread(threads[i]))
            rc = -1;
        if (rc < 0)
            break;
    }

    free(threads);

    if (detach_process(pid) < 0)
        return -1;

    return rc;

#else
    return -1;
#endif /* NO_LIBUNWIND_PTRACE */
}
Esempio n. 3
0
void mp_manager::check_threads(section* pSection)
{
	volatile thread* pBound;
	InterlockedExchangePointer(&pBound , pSection->m_pbound_thread);
	if (pBound ) 
	{
		if (true )
		{
			volatile KickProc pKick;
			InterlockedExchangePointer(&pKick, pBound->m_pKickProc);
			if (pKick && pBound) 
			{
				(*pKick)((thread*) pBound ,(section*) pBound->m_pexclusive_section);
			}
			else
			{
				SetEvent( pBound->m_handle);
			}
		}
	}
	else
	for (int i = 0 ; i < get_threads().size() ; i ++ ) 
	{
		thread* next = get_threads()[i];
		//volatile KickProc pKick;
		volatile section*  pSection;
		//InterlockedExchangePointer(&pKick, next->m_pKickProc);
		InterlockedExchangePointer(&pSection, next->m_pexclusive_section);
		if (pSection) continue;
		if (InterlockedCompareExchange(&next->m_sleeping,0,0)  )
		{
			//printf("Kicking thread  %d" , next->m_id);
			SetEvent( next->m_handle);
			
		}
		break;
	}
}
Esempio n. 4
0
int get_physical_cores()
{
  int regs[4];
  boost::simd::config::x86::cpuid(regs, 0x00000000);
  if(boost::simd::config::x86::get_vendor() == boost::simd::config::x86::intel)
  {
    return (get_logical_cores()/get_threads());
  }
  else if(boost::simd::config::x86::get_vendor() == boost::simd::config::x86::amd)
  {
    boost::simd::config::x86::cpuid(regs, 0x80000008);
    return (get_byte(regs[2], 0) + 1);
  }
  else return -1;
}
Esempio n. 5
0
int main(int argc, char *argv[])
{
	DWORD dwPid = 7668;//GetCurrentProcessId();
	HANDLE hProcess = OpenProcess(PROCESS_ALL_ACCESS, FALSE, dwPid);
	 
	for (auto region : get_regions(hProcess))
		print_range((ULONG_PTR)region.BaseAddress, (ULONG_PTR)region.BaseAddress + region.RegionSize); 

	printf("\nDLLs:\n"); 

	for (auto module : get_modules(hProcess))
		print_range((ULONG_PTR)module.lpBaseOfDll, (ULONG_PTR)module.lpBaseOfDll + (ULONG_PTR)module.lpBaseOfDll);

	printf("\nThreads:\n");
	for (auto thread : get_threads(dwPid))
		print_range((ULONG_PTR)thread.Eip, (ULONG_PTR)thread.Esp);

	getchar();
	return 0;
}
Esempio n. 6
0
/*
 * save process' memory maps, stack contents, thread identifiers and registers
 */
struct snapshot *get_snapshot(int pid, int *tids, int *index, int nr_tids)
{
    struct snapshot *res;
    int attached_tid = 0;
    int i, n_frames;
    long page, label, rc;
    struct mem_data_chunk **stacks_cover = NULL;

    if ((page = sysconf(_SC_PAGESIZE)) < 0) {
        perror("get pagesize");
        return NULL;
    }
    --page;

    res = calloc(1, sizeof(struct snapshot));

    /*
     * create memory_map structure corresponding to process' maps
     */
    res->map = create_maps(pid);
    if (res->map == NULL)
        goto get_snapshot_fail;

    /*
     * get process' threads
     */
    res->num_threads = get_threads(pid, &res->tids);
    if (res->num_threads < 0 || res->tids == NULL)
        goto get_snapshot_fail;

    /*
     * user-provided list of threads
     */
    if (tids != NULL) {
        if (adjust_threads(res->tids, res->num_threads, tids, index, nr_tids) < 0)
            goto get_snapshot_fail;

        free(res->tids);
        res->num_threads = nr_tids;
        res->tids = tids;
    }

    res->cur_thr = 0;

    res->regs = malloc(sizeof(res->regs[0])*res->num_threads);
    if (res->regs == NULL) {
        perror("malloc");
        goto get_snapshot_fail;
    }

    /* FREEZE PROCESS */
    if (attach_process(pid) < 0)
        goto get_snapshot_fail;

    for (i = 0; i < res->num_threads; ++i) {
        struct iovec iov;

        /*
         * we have already attached to main thread. call attach_thread()
         * for other ones
         */
        attached_tid = res->tids[i];
        if (res->tids[i] != pid && attach_thread(res->tids[i]) < 0)
            goto get_snapshot_fail_attached;

        /*
         * save thread's registers
         */
        iov.iov_len = sizeof(res->regs[0]);
        iov.iov_base = &res->regs[i];
        rc = ptrace(PTRACE_GETREGSET, res->tids[i], NT_PRSTATUS, &iov);
        if (rc < 0) {
            perror("PTRACE_GETREGSET");
            goto get_snapshot_fail_attached;
        }

        /*
         * save label on memory region. it will indicate that memory contents
         * upper than this point (%rsp) will needed to unwind stacks
         */
        label = SP_REG(&res->regs[i]) & ~page;
        rc = mem_map_add_label(res->map, (void *)label, res->num_threads);

        if (rc < 0) {
            fprintf(stderr, "failed to add label 0x%lx [rsp 0x%llx thread %d]\n",
                    label, (long long unsigned int)SP_REG(&res->regs[i]), res->tids[i]);
            goto get_snapshot_fail_attached;
        }

        /*
         * detach from thread. it will still be frozen due to SIGSTOP
         */
        if (res->tids[i] != pid && detach_thread(res->tids[i]) < 0)
            goto get_snapshot_fail_attached;
    }

    /*
     * arrange data chunks to copy memory contents. in most cases the chunks
     * will start from %rsp pointing somewhere in thread's stack
     * to the end of the stack region
     */
    stacks_cover = malloc(sizeof(struct mem_data_chunk*) * res->num_threads);

    n_frames = mem_map_build_label_cover(res->map, stack_size,
            stacks_cover, page + 1);

    if (stacks_cover == NULL) {
        fprintf(stderr, "error: stacks cover == NULL, n_frames=%d\n", n_frames);
        goto get_snapshot_fail_attached;
    }

    /*
     * copy memory contents
     */
    rc = copy_memory(pid, stacks_cover, n_frames);

    if (rc < 0)
        goto get_snapshot_fail_attached;

    /* UNFREEZE PROCESS */
    if (detach_process(pid) < 0)
        goto get_snapshot_fail;

    if (opt_verbose) {
        for (i = 0; i < n_frames; ++i) {
            struct mem_data_chunk *chunk = stacks_cover[i];
            printf("chunk #%d: 0x%lx-0x%lx length: %ldK\n",
                    i, (size_t)chunk->start,
                    (size_t)chunk->start + chunk->length,
                    chunk->length >> 10);
        }
    }

    free(stacks_cover);

    return res;

get_snapshot_fail_attached:
    if (attached_tid)
        detach_thread(attached_tid);

    detach_process(pid);

get_snapshot_fail:
    if (opt_verbose) {
        fprintf(stderr, "maps of %d:\n", pid);
        print_proc_maps(pid);
    }

    free(stacks_cover);
    snapshot_destroy(res);
    return NULL;
}
Esempio n. 7
0
int backtrace_ptrace(int pid, int *tids, int *index, int nr_tids)
{
#if !defined (NO_LIBUNWIND_PTRACE)
    int i, count, rc = 0;
    int *threads = NULL;

    count = get_threads(pid, &threads);
    if (!count || threads == NULL)
        return -1;

    if (tids != NULL) {
        if (adjust_threads(threads, count, tids, index, nr_tids) < 0)
            return -1;

        free(threads);
        count = nr_tids;
        threads = tids;
    }

    if (attach_process(pid) < 0)
        return -1;

    for (i = 0; i < count; ++i) {
        void *upt_info;
        int x;
        char comm[16];
        char end_pad[25] = "------------------------";

        x = get_thread_comm(threads[i], comm, sizeof(comm));

        if (x > 0 && x <= sizeof(end_pad))
        {
            end_pad[sizeof(end_pad) - x] = '\0';
            printf("-------------- thread %d (%d) (%s) %s\n", (index != NULL ? index[i] : i + 1), threads[i], comm, end_pad);
        }

        if (threads[i] != pid && attach_thread(threads[i]) < 0) {
            rc = -1;
            break;
        }

        upt_info = _UPT_create(threads[i]);

        if (backtrace_thread(&_UPT_accessors, upt_info) < 0)
            rc = -1;

        _UPT_destroy(upt_info);

        if (threads[i] != pid && detach_thread(threads[i]))
            rc = -1;
        if (rc < 0)
            break;
    }

    free(threads);

    if (detach_process(pid) < 0)
        return -1;

    return rc;

#else
    return -1;
#endif /* NO_LIBUNWIND_PTRACE */
}
Esempio n. 8
0
/*
 * save process' memory maps, stack contents, thread identifiers and registers
 */
struct snapshot *get_snapshot(int pid, int *tids, int *index, int nr_tids)
{
    struct snapshot *res;
    int i, attached_tid, n_frames;
    long page, label, rc;
    struct mem_data_chunk **stacks_cover = NULL;
    int v_major, v_minor;
    int use_process_vm_readv = 0;

    if ((page = sysconf(_SC_PAGESIZE)) < 0) {
        perror("get pagesize");
        return NULL;
    }
    --page;

    res = calloc(1, sizeof(struct snapshot));

    /*
     * create memory_map structure corresponding to process' maps
     */
    res->map = create_maps(pid);
    if (res->map == NULL)
        goto get_snapshot_fail;

    /*
     * get process' threads
     */
    res->num_threads = get_threads(pid, &res->tids);
    if (res->num_threads < 0 || res->tids == NULL)
        goto get_snapshot_fail;

    /*
     * user-provided list of threads
     */
    if (tids != NULL) {
        if (adjust_threads(res->tids, res->num_threads, tids, index, nr_tids) < 0)
            goto get_snapshot_fail;

        free(res->tids);
        res->num_threads = nr_tids;
        res->tids = tids;
    }

    res->cur_thr = 0;

    res->regs = malloc(sizeof(struct user_regs_struct)*res->num_threads);
    if (res->regs == NULL) {
        perror("malloc");
        goto get_snapshot_fail;
    }

    /*
     * decide how to copy memory contents of the process. on newer kernels
     * proc_vm_readv() is used by default. on older kernels or when the option
     * --proc-mem is specified read the file /proc/<pid>/mem
     */
    if (!opt_proc_mem) {
        if (get_kernel_version(&v_major, &v_minor) < 0)
            goto get_snapshot_fail;
        if (((v_major << 16) | v_minor) >= 0x30002)
            use_process_vm_readv = 1;
    } else {
        use_process_vm_readv = 0;
    }

    /* FREEZE PROCESS */
    if (attach_process(pid) < 0)
        goto get_snapshot_fail;

    for (i = 0; i < res->num_threads; ++i) {
        /*
         * we have already attached to main thread. call attach_thread()
         * for other ones
         */
        attached_tid = res->tids[i];
        if (res->tids[i] != pid && attach_thread(res->tids[i]) < 0)
            goto get_snapshot_fail_attached;

        /*
         * save thread's registers
         */
        rc = ptrace(PTRACE_GETREGS, res->tids[i], NULL, &res->regs[i]);
        if (rc < 0) {
            perror("PTRACE_GETREGS");
            goto get_snapshot_fail_attached;
        }

        /*
         * save label on memory region. it will indicate that memory contents
         * upper than this point (%rsp) will needed to unwind stacks
         */
        label = res->regs[i].rsp & ~page;
        rc = mem_map_add_label(res->map, (void *)label, res->num_threads);

        if (rc < 0) {
            fprintf(stderr, "failed to add label 0x%lx [rsp 0x%lx thread %d]\n",
                    label, res->regs[i].rsp, res->tids[i]);
            goto get_snapshot_fail_attached;
        }

        /*
         * detach from thread. it will still be frozen due to SIGSTOP
         */
        if (res->tids[i] != pid && detach_thread(res->tids[i]) < 0)
            goto get_snapshot_fail_attached;
    }

    /*
     * arrange data chunks to copy memory contents. in most cases the chunks
     * will start from %rsp pointing somewhere in thread's stack
     * to the end of the stack region
     */
    stacks_cover = malloc(sizeof(struct mem_data_chunk*) * res->num_threads);

    n_frames = mem_map_build_label_cover(res->map, stack_size,
            stacks_cover, page + 1);

    if (stacks_cover == NULL) {
        fprintf(stderr, "error: stacks cover == NULL, n_frames=%d\n", n_frames);
        goto get_snapshot_fail_attached;
    }

    /*
     * copy memory contents
     */
    rc = use_process_vm_readv ?
        copy_memory_process_vm_readv(pid, stacks_cover, n_frames) :
        copy_memory_proc_mem(pid, stacks_cover, n_frames);

    if (rc < 0)
        goto get_snapshot_fail_attached;

    /* UNFREEZE PROCESS */
    if (detach_process(pid) < 0)
        goto get_snapshot_fail;

    if (opt_verbose) {
        for (i = 0; i < n_frames; ++i) {
            struct mem_data_chunk *chunk = stacks_cover[i];
            printf("chunk #%d: 0x%lx-0x%lx length: %ldK\n",
                    i, (size_t)chunk->start,
                    (size_t)chunk->start + chunk->length,
                    chunk->length >> 10);
        }
    }

    free(stacks_cover);

    return res;

get_snapshot_fail_attached:
    if (attached_tid)
        detach_thread(attached_tid);

    detach_process(pid);

get_snapshot_fail:
    if (opt_verbose) {
        fprintf(stderr, "maps of %d:\n", pid);
        print_proc_maps(pid);
    }

    free(stacks_cover);
    snapshot_destroy(res);
    return NULL;
}