/* * The calling thread requests to create a user-level thread that runs the * func. The context of function should be properly created and stored on the * ready queue for execution. The function returns 0 on success and -1 otherwise */ int uthread_create(void (*func)()) { sem_wait(&mutex); if(initialized == 0) { fprintf(stderr, "Error, system needs to be initialized \n"); sem_post(&mutex); return -1; } sem_post(&mutex); //http://linux.die.net/man/3/makecontext struct thread* t = initializeThread(); makecontext(t->context, func, 0); sem_wait(&mutex); if(num_running_threads == 0) { num_running_threads++; CALLING_THREAD = t; sem_post(&mutex); clone(doSetContext, malloc(16384), CLONE_VM, t->context); free(t); } else { add(t); sem_post(&mutex); } return 0; }
void *MyThreadCreate (void (*start_funct)(void *), void *args) { Thread *thread = initializeThread(start_funct, args); insertIntoQueue(readyQueue, thread); insertIntoQueue(currentThread->children, thread); thread->parent = currentThread; return (void *)thread; }
void MyThreadInit(void(*start_funct)(void *), void *args) { initProcesssContext = (ucontext_t *)malloc(sizeof(ucontext_t)); readyQueue = createAndInitializeQueue(); blockedQueue = createAndInitializeQueue(); currentThread = initializeThread(start_funct, args); initThread = currentThread; swapcontext(initProcesssContext, &(currentThread->uctxt)); return; }
/* * This function has to be called before any other uthread library function. * Initializes the uthread system. Should maintain data structure of a ready * queue, number of currently running kernel threads (should not exceed 1), and * the number of processes that are currently waiting for the I/O operation */ void system_init() { if(initialized == 1) { fprintf(stderr, "Error, system has been initialized already \n"); exit(EXIT_FAILURE); } else { sem_init(&mutex, 0, 1); sem_wait(&mutex); initialized = 1; num_running_threads = 0; rq_size = 0; num_io_threads = 0; HEAD = NULL; TAIL = NULL; CALLING_THREAD = initializeThread(); CALLING_THREAD->type = 0; sem_post(&mutex); } }
/* * The function should be called right after it finishes I/O operations. We * assume that when this function is called, the state of the caling process is * switched from the waiting state to the ready state. It should save the * context of current thread and put it in the ready queue. Note that the kernel * thread it is currently associated with needs to be terminated after this * function is called, because its kernel thread is only for initiating I/O and * waiting for the I/O to be completed. This function returns 0 on success and * -1 otherwise */ int uthread_endIO() { sem_wait(&mutex); if(initialized == 0) { fprintf(stderr, "Error, system needs to be initialized \n"); sem_post(&mutex); return -1; } sem_post(&mutex); struct thread* t = initializeThread(); t->type = 1; sem_wait(&mutex); add(t); num_io_threads--; sem_post(&mutex); return 0; }
static void dispatch_callback(ffi_cif* cif, void* resp, void** cbargs, void* user_data) { callback* cb = ((callback *)user_data); JavaVM* jvm = cb->vm; JNIEnv* env = NULL; int was_attached = (*jvm)->GetEnv(jvm, (void *)&env, JNI_VERSION_1_4) == JNI_OK; jboolean needs_detach = was_attached ? JNI_FALSE : JNI_TRUE; thread_storage* tls = was_attached ? get_thread_storage(env) : NULL; if (!was_attached) { int attach_status = 0; JavaVMAttachArgs args; int daemon = JNI_FALSE; args.version = JNI_VERSION_1_2; args.name = NULL; args.group = NULL; if (cb->behavior_flags & CB_HAS_INITIALIZER) { AttachOptions options; options.daemon = JNI_FALSE; // default non-daemon options.detach = JNI_TRUE; // default detach behavior options.name = NULL; args.group = initializeThread(cb, &options); daemon = options.daemon ? JNI_TRUE : JNI_FALSE; needs_detach = options.detach ? JNI_TRUE : JNI_FALSE; args.name = options.name; } if (daemon) { attach_status = (*jvm)->AttachCurrentThreadAsDaemon(jvm, (void*)&env, &args); } else { attach_status = (*jvm)->AttachCurrentThread(jvm, (void *)&env, &args); } tls = get_thread_storage(env); if (tls) { snprintf(tls->name, sizeof(tls->name), "%s", args.name ? args.name : "<unconfigured native thread>"); tls->needs_detach = needs_detach; tls->jvm_thread = JNI_FALSE; } // Dispose of allocated memory free(args.name); if (attach_status != JNI_OK) { fprintf(stderr, "JNA: Can't attach native thread to VM for callback: %d\n", attach_status); return; } if (args.group) { (*env)->DeleteWeakGlobalRef(env, args.group); } } if (!tls) { fprintf(stderr, "JNA: couldn't obtain thread-local storage\n"); return; } // Give the callback glue its own local frame to ensure all local references // are properly disposed if ((*env)->PushLocalFrame(env, 16) < 0) { fprintf(stderr, "JNA: Out of memory: Can't allocate local frame\n"); } else { invoke_callback(env, cb, cif, resp, cbargs); // Make note of whether the callback wants to avoid detach needs_detach = tls->needs_detach && !tls->jvm_thread; (*env)->PopLocalFrame(env, NULL); } if (needs_detach) { if ((*jvm)->DetachCurrentThread(jvm) != 0) { fprintf(stderr, "JNA: could not detach thread\n"); } } }
void WIOService::run() { initializeThread(); boost::asio::io_service::run(); }
bool ThreadStereo::processHypothesesFromInit(corresponding_pts& start, tangent_and_score& tan) { vector<ThreadPiece_Vision*> currPieces; int num_pieces_max = (int)(myThread.total_length/myThread.length_thread_each_piece); //std::cout << "total length: " << myThread.total_length << std::endl; //std::cout << "length each piece: " << myThread.length_thread_each_piece << std::endl; std::cout << "num pieces max: " << num_pieces_max << std::endl; ThreadOptimizingModes mode = INITIALIZING; bool done = false; while (!done) { switch (mode) { case (INITIALIZING): if (initializeThread(start,tan,currPieces)) { mode = CONTINUING; } else { done = true; } break; case (CONTINUING): continueThreadUntilEnd(currPieces, num_pieces_max); (myThread.threadPiecesCurr).resize(currPieces.size()); for (int i=0; i < currPieces.size(); i++) { myThread.threadPiecesCurr[i] = currPieces[i]; currPieces[i] = NULL; } done = true; break; /* case (INIT_OPPOSITE): ThreadPiece_Vision* firstPiece = currPieces[0]; Matrix4d inv_trans; firstPiece->getTransformBefore(inv_trans); Matrix3d oldRot = inv_trans.corner(Eigen::TopLeft,3,3); inv_trans.corner(Eigen::TopLeft,3,3) = (Eigen::AngleAxisd(M_PI, inv_trans.block(0,1,3,1)))*oldRot; if (!continueThreadOpposite(oppositePieces, inv_trans, firstPiece->_curvature, firstPiece->_torsion, num_pieces_max)) { done = true; //no pieces this way! break; } std::cout << "size opposite: " << oppositePieces.size() << std::endl; //now, we need to put the pieces together ThreadPiece_Vision* currPieceToAdd = oppositePieces.back(); inv_trans = currPieceToAdd->_transform_after; oldRot = inv_trans.corner(Eigen::TopLeft,3,3); inv_trans.corner(Eigen::TopLeft,3,3) = (Eigen::AngleAxisd(M_PI, inv_trans.block(0,1,3,1)))*oldRot; toReturn.pieces.push_back(new ThreadPiece_Vision(currPieceToAdd->_curvature, currPieceToAdd->_torsion, currPieceToAdd->_length)); toReturn.pieces.back()->setPrevTransform(inv_trans); toReturn.pieces.back()->_numPieces = 1; int pieceNum; for (pieceNum = 1; pieceNum < oppositePieces.size(); pieceNum++) { currPieceToAdd = oppositePieces[oppositePieces.size()-1-pieceNum]; toReturn.pieces.push_back(new ThreadPiece_Vision(currPieceToAdd->_curvature, currPieceToAdd->_torsion, currPieceToAdd->_length, toReturn.pieces[pieceNum-1])); } for (pieceNum = 0; pieceNum < currPieces.size(); pieceNum++) { currPieceToAdd = currPieces[pieceNum]; toReturn.pieces.push_back(new ThreadPiece_Vision(currPieceToAdd->_curvature, currPieceToAdd->_torsion, currPieceToAdd->_length, toReturn.pieces[toReturn.pieces.size()-1])); } done = true; break; */ } } //resample, reoptimize //resampleAndReoptimize(toReturn.pieces, NUM_PIECES_AFTER_REOPTIMIZATION, toReturn.pieces_reopt); delete opt_params_vision.orig_params_each_piece; if (myThread.threadPiecesCurr.size() > NUM_THREAD_PIECES_FIRST_OPT) { myThread.setNextPointers(myThread.threadPiecesCurr); return true; } else { return false; } }
static void callback_dispatch(ffi_cif* cif, void* resp, void** cbargs, void* user_data) { callback* cb = ((callback *)user_data); JavaVM* jvm = cb->vm; JNIEnv* env; int was_attached = (*jvm)->GetEnv(jvm, (void *)&env, JNI_VERSION_1_4) == JNI_OK; jboolean detach = was_attached ? JNI_FALSE : JNI_TRUE; if (!was_attached) { int attach_status = 0; JavaVMAttachArgs args; jobject group = NULL; int daemon = JNI_FALSE; args.version = JNI_VERSION_1_2; args.name = NULL; args.group = NULL; if (cb->behavior_flags & CB_HAS_INITIALIZER) { AttachOptions options; options.daemon = JNI_FALSE; // default non-daemon options.detach = JNI_TRUE; // default detach behavior options.name = NULL; args.group = initializeThread(cb, &options); daemon = options.daemon ? JNI_TRUE : JNI_FALSE; detach = options.detach ? JNI_TRUE : JNI_FALSE; args.name = options.name; } if (daemon) { attach_status = (*jvm)->AttachCurrentThreadAsDaemon(jvm, (void*)&env, &args); } else { attach_status = (*jvm)->AttachCurrentThread(jvm, (void *)&env, &args); } if (attach_status != JNI_OK) { fprintf(stderr, "JNA: Can't attach native thread to VM for callback: %d\n", attach_status); return; } if (args.group) { (*env)->DeleteWeakGlobalRef(env, args.group); } } // Give the callback glue its own local frame to ensure all local references // are properly disposed if ((*env)->PushLocalFrame(env, 16) < 0) { fprintf(stderr, "JNA: Out of memory: Can't allocate local frame"); } else { // Kind of a hack, use last error value rather than setting up our own TLS setLastError(0); callback_invoke(env, cb, cif, resp, cbargs); // Must be invoked immediately after return to avoid anything // stepping on errno/GetLastError switch(lastError()) { case THREAD_LEAVE_ATTACHED: detach = JNI_FALSE; break; case THREAD_DETACH: detach = JNI_TRUE; break; default: break; /* use default detach behavior */ } (*env)->PopLocalFrame(env, NULL); } if (detach) { (*jvm)->DetachCurrentThread(jvm); } }