static inline void mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) { #ifdef MON_DEBUG u8 msg_type[2], mca_type; unsigned long records_len; records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; memcpy(msg_type, &monmsg->msg.class, 2); EBCASC(msg_type, 2); mca_type = mon_mca_type(monmsg, 0); EBCASC(&mca_type, 1); P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", monpriv->read_index, monpriv->write_index); P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", msg_type[0], msg_type[1], mca_type ? mca_type : 'X', mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); P_DEBUG("read, MCA: start = 0x%lX, end = 0x%lX\n", mon_mca_start(monmsg), mon_mca_end(monmsg)); P_DEBUG("read, REC: start = 0x%X, end = 0x%X, len = %lu\n\n", mon_rec_start(monmsg), mon_rec_end(monmsg), records_len); if (mon_mca_size(monmsg) > 12) P_DEBUG("READ, MORE THAN ONE MCA\n\n"); #endif }
/****************************************************************************** * IUCV handler * *****************************************************************************/ static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) { struct mon_private *monpriv = path->private; P_DEBUG("IUCV connection completed\n"); P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " "0x%02X, Sample = 0x%02X\n", ipuser[0], ipuser[1], ipuser[2]); atomic_set(&monpriv->iucv_connected, 1); wake_up(&mon_conn_wait_queue); }
/* * appldata_net_init() * * init data, register ops */ static int __init appldata_net_init(void) { int rc; P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data)); rc = appldata_register_ops(&ops); if (rc != 0) { P_ERROR("Error registering ops, rc = %i\n", rc); } else { P_DEBUG("%s-ops registered!\n", ops.name); } return rc; }
void printAppOutput(pid_t pid, const char *channelName, const char *message, unsigned int size) { if (printAppOutputAsDebuggingMessages) { P_DEBUG("App " << pid << " " << channelName << ": " << StaticString(message, size)); } else { char pidStr[sizeof("4294967295")]; unsigned int pidStrLen, channelNameLen, totalLen; try { pidStrLen = integerToOtherBase<pid_t, 10>(pid, pidStr, sizeof(pidStr)); } catch (const std::length_error &) { pidStr[0] = '?'; pidStr[1] = '\0'; pidStrLen = 1; } channelNameLen = strlen(channelName); totalLen = (sizeof("App X Y: \n") - 2) + pidStrLen + channelNameLen + size; if (totalLen < 1024) { char buf[1024]; realPrintAppOutput(buf, sizeof(buf), pidStr, pidStrLen, channelName, channelNameLen, message, size); } else { DynamicBuffer buf(totalLen); realPrintAppOutput(buf.data, totalLen, pidStr, pidStrLen, channelName, channelNameLen, message, size); } } }
void Group::restart(const Options &options, RestartMethod method) { boost::container::vector<Callback> actions; assert(isAlive()); P_DEBUG("Restarting group " << getName()); // If there is currently a restarter thread or a spawner thread active, // the following tells them to abort their current work as soon as possible. restartsInitiated++; processesBeingSpawned = 0; m_spawning = false; m_restarting = true; uuid = generateUuid(pool); detachAll(actions); getPool()->interruptableThreads.create_thread( boost::bind(&Group::finalizeRestart, this, shared_from_this(), this->options.copyAndPersist().clearPerRequestFields(), options.copyAndPersist().clearPerRequestFields(), method, getContext()->getSpawningKitFactory(), restartsInitiated, actions), "Group restarter: " + getName(), POOL_HELPER_THREAD_STACK_SIZE ); }
static inline void mon_next_mca(struct mon_msg *monmsg) { if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) return; P_DEBUG("READ, NEXT MCA\n\n"); monmsg->mca_offset += 12; monmsg->pos = 0; }
static void asyncCommitConfigChangeForAdminPanelConnectorDone(AdminPanelConnector::ConfigChangeRequest &_, ConfigChangeRequest *req) { boost::lock_guard<boost::mutex> l(workingObjects->configSyncher); P_DEBUG("asyncCommitConfigChangeForAdminPanelConnectorDone: counter " << req->counter << " -> " << (req->counter - 1)); asyncCommitConfigChangeCompletedOne(req); }
void Group::initiateOobw(const ProcessPtr &process) { assert(process->oobwStatus == Process::OOBW_REQUESTED); process->oobwStatus = Process::OOBW_IN_PROGRESS; if (process->enabled == Process::ENABLED || process->enabled == Process::DISABLING) { // We want the process to be disabled. However, disabling a process is potentially // asynchronous, so we pass a callback which will re-aquire the lock and call this // method again. P_DEBUG("Disabling process " << process->inspect() << " in preparation for OOBW"); DisableResult result = disable(process, boost::bind(&Group::lockAndMaybeInitiateOobw, this, _1, _2, shared_from_this())); switch (result) { case DR_SUCCESS: // Continue code flow. break; case DR_DEFERRED: // lockAndMaybeInitiateOobw() will eventually be called. return; case DR_ERROR: case DR_NOOP: P_DEBUG("Out-of-band work for process " << process->inspect() << " aborted " "because the process could not be disabled"); process->oobwStatus = Process::OOBW_NOT_ACTIVE; return; default: P_BUG("Unexpected disable() result " << result); } } assert(process->enabled == Process::DISABLED); assert(process->sessions == 0); P_DEBUG("Initiating OOBW request for process " << process->inspect()); interruptableThreads.create_thread( boost::bind(&Group::spawnThreadOOBWRequest, this, shared_from_this(), process), "OOBW request thread for process " + process->inspect(), POOL_HELPER_THREAD_STACK_SIZE); }
static void asyncPrepareConfigChangeForAdminPanelConnectorDone(const vector<ConfigKit::Error> &errors, AdminPanelConnector::ConfigChangeRequest &_, ConfigChangeRequest *req) { vector<ConfigKit::Error> translatedErrors = coreSchema->adminPanelConnector.translator.reverseTranslate(errors); boost::lock_guard<boost::mutex> l(workingObjects->configSyncher); P_DEBUG("asyncPrepareConfigChangeForAdminPanelConnectorDone: counter " << req->counter << " -> " << (req->counter - 1)); req->errors.insert(req->errors.begin(), translatedErrors.begin(), translatedErrors.end()); asyncPrepareConfigChangeCompletedOne(req); }
static void asyncCommitConfigChangeForApiServer(ConfigChangeRequest *req) { ApiWorkingObjects *awo = &workingObjects->apiWorkingObjects; awo->serverKitContext->commitConfigChange(req->forApiServerKit); awo->apiServer->commitConfigChange(req->forApiServer); boost::lock_guard<boost::mutex> l(workingObjects->configSyncher); P_DEBUG("asyncCommitConfigChangeForApiServer: counter " << req->counter << " -> " << (req->counter - 1)); asyncCommitConfigChangeCompletedOne(req); }
static void asyncCommitConfigChangeForController(unsigned int i, ConfigChangeRequest *req) { ThreadWorkingObjects *two = &workingObjects->threadWorkingObjects[i]; two->serverKitContext->commitConfigChange(*req->forControllerServerKit[i]); two->controller->commitConfigChange(*req->forController[i]); boost::lock_guard<boost::mutex> l(workingObjects->configSyncher); P_DEBUG("asyncCommitConfigChangeForController(" << i << "): counter " << req->counter << " -> " << (req->counter - 1)); asyncCommitConfigChangeCompletedOne(req); }
void Group::onSessionInitiateFailure(const ProcessPtr &process, Session *session) { vector<Callback> actions; TRACE_POINT(); // Standard resource management boilerplate stuff... PoolPtr pool = getPool(); boost::unique_lock<boost::mutex> lock(pool->syncher); assert(process->isAlive()); assert(isAlive() || getLifeStatus() == SHUTTING_DOWN); UPDATE_TRACE_POINT(); P_DEBUG("Could not initiate a session with process " << process->inspect() << ", detaching from pool if possible"); if (!pool->detachProcessUnlocked(process, actions)) { P_DEBUG("Process was already detached"); } pool->fullVerifyInvariants(); lock.unlock(); runAllActions(actions); }
// The 'self' parameter is for keeping the current Group object alive void Group::lockAndMaybeInitiateOobw(const ProcessPtr &process, DisableResult result, GroupPtr self) { TRACE_POINT(); // Standard resource management boilerplate stuff... PoolPtr pool = getPool(); boost::unique_lock<boost::mutex> lock(pool->syncher); if (OXT_UNLIKELY(!process->isAlive() || !isAlive())) { return; } assert(process->oobwStatus == Process::OOBW_IN_PROGRESS); if (result == DR_SUCCESS) { if (process->enabled == Process::DISABLED) { P_DEBUG("Process " << process->inspect() << " disabled; proceeding " << "with out-of-band work"); process->oobwStatus = Process::OOBW_REQUESTED; if (shouldInitiateOobw(process)) { initiateOobw(process); } else { // We do not re-enable the process because it's likely that the // administrator has explicitly changed the state. P_DEBUG("Out-of-band work for process " << process->inspect() << " aborted " "because the process no longer requests out-of-band work"); process->oobwStatus = Process::OOBW_NOT_ACTIVE; } } else { // We do not re-enable the process because it's likely that the // administrator has explicitly changed the state. P_DEBUG("Out-of-band work for process " << process->inspect() << " aborted " "because the process was reenabled after disabling"); process->oobwStatus = Process::OOBW_NOT_ACTIVE; } } else { P_DEBUG("Out-of-band work for process " << process->inspect() << " aborted " "because the process could not be disabled"); process->oobwStatus = Process::OOBW_NOT_ACTIVE; } }
int Socket::Connect() { int ret = 0; unsigned int **addrlist = (unsigned int **)host->h_addr_list; struct sockaddr_in *sap = (struct sockaddr_in*) &sa; for( int i=0; host->h_addr_list[i]; i++ ){ in_addr sin_addr; bcopy( host->h_addr_list[i], &sin_addr, host->h_length ); P_DEBUG("h_addr_list[%d] = %s\n", i, inet_ntoa(sin_addr) ); } if (sap->sin_addr.s_addr == 0xffffffff) { if (host == NULL) { perror("Connect.host=NULL"); } while( *addrlist != NULL ) { sap->sin_addr.s_addr = **addrlist; if(! (ret = connect(sock, &sa, sizeof(sa))) ) { P_DEBUG("connect\n"); break; } printf("hoge"); addrlist++; } if( *addrlist == NULL) { perror("Connect.noip"); } } else { if( ret = connect(sock, &sa, sizeof(sa)) ) { perror("Connect()"); } } return ret; }
static int __init test_init(void) //模块初始化函数 { int result = 0; /*1.申请设备号*/ if(major){ devno = MKDEV(major, minor); result = register_chrdev_region(devno, 1, "test new driver"); }else{ result = alloc_chrdev_region(&devno, minor, 1, "test alloc diver"); major = MAJOR(devno); minor = MINOR(devno); } if(result < 0){ P_DEBUG("register devno errno!\n"); goto err0; } printk("major[%d] minor[%d]\n", major, minor); /*2.注册设备*/ cdev_init(&test_cdev, &test_fops); test_cdev.owner = THIS_MODULE; result = cdev_add(&test_cdev, devno, 1); if(result < 0){ P_DEBUG("cdev_add errno!\n"); goto err1; } printk("hello kernel\n"); return 0; err1: unregister_chrdev_region(devno, 1); err0: return result; }
/** * The `immediately` parameter only has effect if the detached processes checker * thread is active. It means that, if the thread is currently sleeping, it should * wake up immediately and perform work. */ void Group::startCheckingDetachedProcesses(bool immediately) { if (!detachedProcessesCheckerActive) { P_DEBUG("Starting detached processes checker"); getPool()->nonInterruptableThreads.create_thread( boost::bind(&Group::detachedProcessesCheckerMain, this, shared_from_this()), "Detached processes checker: " + name, POOL_HELPER_THREAD_STACK_SIZE ); detachedProcessesCheckerActive = true; } else if (detachedProcessesCheckerActive && immediately) { detachedProcessesCheckerCond.notify_all(); } }
ssize_t test_read(struct file *filp, char __user *buf, size_t count, loff_t *offset) { // memcpy(buf, "test_data", count); // return 0; int ret; // memcpy(buf, "test_data", count); if (copy_to_user(buf, "test_data", count)){ ret = - EFAULT; }else{ ret = count; P_DEBUG("kbuf is [%s]\n", buf); } return ret; //返回实际读取的字节数或错误号 }
static void mon_iucv_message_pending(struct iucv_path *path, struct iucv_message *msg) { struct mon_private *monpriv = path->private; P_DEBUG("IUCV message pending\n"); memcpy(&monpriv->msg_array[monpriv->write_index]->msg, msg, sizeof(*msg)); if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { P_WARNING("IUCV message pending, message limit (%i) reached\n", MON_MSGLIM); monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; } monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; atomic_inc(&monpriv->read_ready); wake_up_interruptible(&mon_read_wait_queue); }
static int mon_check_mca(struct mon_msg *monmsg) { if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || (mon_rec_start(monmsg) < mon_dcss_start) || (mon_rec_end(monmsg) > mon_dcss_end) || (mon_mca_type(monmsg, 0) == 0) || (mon_mca_size(monmsg) % 12 != 0) || (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) || (mon_mca_end(monmsg) > mon_dcss_end) || (mon_mca_start(monmsg) < mon_dcss_start) || ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0))) { P_DEBUG("READ, IGNORED INVALID MCA\n\n"); return -EINVAL; } return 0; }
ssize_t test_write(struct file *filp, const char __user *buf, size_t count, loff_t *offset) { // char kbuf[20]; // // memcpy(kbuf, buf, count); // P_DEBUG("kbuf is [%s]\n", kbuf); // return 0; char kbuf[20]; int ret; //memcpy(kbuf, buf, count); if(copy_from_user(kbuf, buf, count)){ ret = - EFAULT; }else{ ret = count; P_DEBUG("kbuf is [%s]\n", kbuf); } return ret; //返回实际写入的字节数或错误号 }
static void asyncPrepareConfigChangeForApiServer(const Json::Value &updates, ConfigChangeRequest *req) { vector<ConfigKit::Error> errors1, errors2; ConfigKit::prepareConfigChangeForSubComponent( *workingObjects->apiWorkingObjects.serverKitContext, coreSchema->apiServerKit.translator, req->config->inspectEffectiveValues(), errors1, req->forApiServerKit); ConfigKit::prepareConfigChangeForSubComponent( *workingObjects->apiWorkingObjects.apiServer, coreSchema->apiServer.translator, req->config->inspectEffectiveValues(), errors2, req->forApiServer); boost::lock_guard<boost::mutex> l(workingObjects->configSyncher); P_DEBUG("asyncPrepareConfigChangeForApiServer: counter " << req->counter << " -> " << (req->counter - 1)); req->errors.insert(req->errors.begin(), errors1.begin(), errors1.end()); req->errors.insert(req->errors.begin(), errors2.begin(), errors2.end()); asyncPrepareConfigChangeCompletedOne(req); }
int test_close(struct inode *node, struct file *filp) { P_DEBUG("close device\n"); return 0; }
// The 'self' parameter is for keeping the current Group object alive while this thread is running. void Group::spawnThreadOOBWRequest(GroupPtr self, ProcessPtr process) { TRACE_POINT(); this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; Socket *socket; Connection connection; PoolPtr pool = getPool(); Pool::DebugSupportPtr debug = pool->debugSupport; UPDATE_TRACE_POINT(); P_DEBUG("Performing OOBW request for process " << process->inspect()); if (debug != NULL && debug->oobw) { debug->debugger->send("OOBW request about to start"); debug->messages->recv("Proceed with OOBW request"); } UPDATE_TRACE_POINT(); { // Standard resource management boilerplate stuff... boost::unique_lock<boost::mutex> lock(pool->syncher); if (OXT_UNLIKELY(!process->isAlive() || process->enabled == Process::DETACHED || !isAlive())) { return; } if (process->enabled != Process::DISABLED) { UPDATE_TRACE_POINT(); P_INFO("Out-of-Band Work canceled: process " << process->inspect() << " was concurrently re-enabled."); if (debug != NULL && debug->oobw) { debug->debugger->send("OOBW request canceled"); } return; } assert(process->oobwStatus == Process::OOBW_IN_PROGRESS); assert(process->sessions == 0); socket = process->sessionSockets.top(); assert(socket != NULL); } UPDATE_TRACE_POINT(); unsigned long long timeout = 1000 * 1000 * 60; // 1 min try { this_thread::restore_interruption ri(di); this_thread::restore_syscall_interruption rsi(dsi); // Grab a connection. The connection is marked as fail in order to // ensure it is closed / recycled after this request (otherwise we'd // need to completely read the response). connection = socket->checkoutConnection(); connection.fail = true; ScopeGuard guard(boost::bind(&Socket::checkinConnection, socket, connection)); // This is copied from RequestHandler when it is sending data using the // "session" protocol. char sizeField[sizeof(uint32_t)]; SmallVector<StaticString, 10> data; data.push_back(StaticString(sizeField, sizeof(uint32_t))); data.push_back(makeStaticStringWithNull("REQUEST_METHOD")); data.push_back(makeStaticStringWithNull("OOBW")); data.push_back(makeStaticStringWithNull("PASSENGER_CONNECT_PASSWORD")); data.push_back(makeStaticStringWithNull(process->connectPassword)); uint32_t dataSize = 0; for (unsigned int i = 1; i < data.size(); i++) { dataSize += (uint32_t) data[i].size(); } Uint32Message::generate(sizeField, dataSize); gatheredWrite(connection.fd, &data[0], data.size(), &timeout); // We do not care what the actual response is ... just wait for it. UPDATE_TRACE_POINT(); waitUntilReadable(connection.fd, &timeout); } catch (const SystemException &e) { P_ERROR("*** ERROR: " << e.what() << "\n" << e.backtrace()); } catch (const TimeoutException &e) { P_ERROR("*** ERROR: " << e.what() << "\n" << e.backtrace()); } UPDATE_TRACE_POINT(); vector<Callback> actions; { // Standard resource management boilerplate stuff... PoolPtr pool = getPool(); boost::unique_lock<boost::mutex> lock(pool->syncher); if (OXT_UNLIKELY(!process->isAlive() || !isAlive())) { return; } process->oobwStatus = Process::OOBW_NOT_ACTIVE; if (process->enabled == Process::DISABLED) { enable(process, actions); assignSessionsToGetWaiters(actions); } pool->fullVerifyInvariants(); initiateNextOobwRequest(); } UPDATE_TRACE_POINT(); runAllActions(actions); actions.clear(); UPDATE_TRACE_POINT(); P_DEBUG("Finished OOBW request for process " << process->inspect()); if (debug != NULL && debug->oobw) { debug->debugger->send("OOBW request finished"); } }
void Group::spawnThreadRealMain(const SpawnerPtr &spawner, const Options &options, unsigned int restartsInitiated) { TRACE_POINT(); this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; PoolPtr pool = getPool(); Pool::DebugSupportPtr debug = pool->debugSupport; bool done = false; while (!done) { bool shouldFail = false; if (debug != NULL && debug->spawning) { UPDATE_TRACE_POINT(); this_thread::restore_interruption ri(di); this_thread::restore_syscall_interruption rsi(dsi); this_thread::interruption_point(); string iteration; { LockGuard g(debug->syncher); debug->spawnLoopIteration++; iteration = toString(debug->spawnLoopIteration); } P_DEBUG("Begin spawn loop iteration " << iteration); debug->debugger->send("Begin spawn loop iteration " + iteration); vector<string> cases; cases.push_back("Proceed with spawn loop iteration " + iteration); cases.push_back("Fail spawn loop iteration " + iteration); MessagePtr message = debug->messages->recvAny(cases); shouldFail = message->name == "Fail spawn loop iteration " + iteration; } ProcessPtr process; ExceptionPtr exception; try { UPDATE_TRACE_POINT(); this_thread::restore_interruption ri(di); this_thread::restore_syscall_interruption rsi(dsi); if (shouldFail) { throw SpawnException("Simulated failure"); } else { process = spawner->spawn(options); process->setGroup(shared_from_this()); } } catch (const thread_interrupted &) { break; } catch (const tracable_exception &e) { exception = copyException(e); // Let other (unexpected) exceptions crash the program so // gdb can generate a backtrace. } UPDATE_TRACE_POINT(); ScopeGuard guard(boost::bind(Process::forceTriggerShutdownAndCleanup, process)); boost::unique_lock<boost::mutex> lock(pool->syncher); if (!isAlive()) { if (process != NULL) { P_DEBUG("Group is being shut down so dropping process " << process->inspect() << " which we just spawned and exiting spawn loop"); } else { P_DEBUG("The group is being shut down. A process failed " "to be spawned anyway, so ignoring this error and exiting " "spawn loop"); } // We stop immediately because any previously assumed invariants // may have been violated. break; } else if (restartsInitiated != this->restartsInitiated) { if (process != NULL) { P_DEBUG("A restart was issued for the group, so dropping process " << process->inspect() << " which we just spawned and exiting spawn loop"); } else { P_DEBUG("A restart was issued for the group. A process failed " "to be spawned anyway, so ignoring this error and exiting " "spawn loop"); } // We stop immediately because any previously assumed invariants // may have been violated. break; } verifyInvariants(); assert(m_spawning); assert(processesBeingSpawned > 0); processesBeingSpawned--; assert(processesBeingSpawned == 0); UPDATE_TRACE_POINT(); vector<Callback> actions; if (process != NULL) { AttachResult result = attach(process, actions); if (result == AR_OK) { guard.clear(); if (getWaitlist.empty()) { pool->assignSessionsToGetWaiters(actions); } else { assignSessionsToGetWaiters(actions); } P_DEBUG("New process count = " << enabledCount << ", remaining get waiters = " << getWaitlist.size()); } else { done = true; P_DEBUG("Unable to attach spawned process " << process->inspect()); if (result == AR_ANOTHER_GROUP_IS_WAITING_FOR_CAPACITY) { pool->possiblySpawnMoreProcessesForExistingGroups(); } } } else { // TODO: sure this is the best thing? if there are // processes currently alive we should just use them. P_ERROR("Could not spawn process for group " << name << ": " << exception->what() << "\n" << exception->backtrace()); if (enabledCount == 0) { enableAllDisablingProcesses(actions); } Pool::assignExceptionToGetWaiters(getWaitlist, exception, actions); pool->assignSessionsToGetWaiters(actions); done = true; } done = done || (processLowerLimitsSatisfied() && getWaitlist.empty()) || processUpperLimitsReached() || pool->atFullCapacity(false); m_spawning = !done; if (done) { P_DEBUG("Spawn loop done"); } else { processesBeingSpawned++; P_DEBUG("Continue spawning"); } UPDATE_TRACE_POINT(); pool->fullVerifyInvariants(); lock.unlock(); UPDATE_TRACE_POINT(); runAllActions(actions); UPDATE_TRACE_POINT(); } if (debug != NULL && debug->spawning) { debug->debugger->send("Spawn loop done"); } }
void Group::onSessionClose(const ProcessPtr &process, Session *session) { TRACE_POINT(); // Standard resource management boilerplate stuff... PoolPtr pool = getPool(); boost::unique_lock<boost::mutex> lock(pool->syncher); assert(process->isAlive()); assert(isAlive() || getLifeStatus() == SHUTTING_DOWN); P_TRACE(2, "Session closed for process " << process->inspect()); verifyInvariants(); UPDATE_TRACE_POINT(); /* Update statistics. */ process->sessionClosed(session); assert(process->getLifeStatus() == Process::ALIVE); assert(process->enabled == Process::ENABLED || process->enabled == Process::DISABLING || process->enabled == Process::DETACHED); if (process->enabled == Process::ENABLED) { pqueue.decrease(process->pqHandle, process->busyness()); } /* This group now has a process that's guaranteed to be not * totally busy. */ assert(!process->isTotallyBusy()); bool detachingBecauseOfMaxRequests = false; bool detachingBecauseCapacityNeeded = false; bool shouldDetach = ( detachingBecauseOfMaxRequests = ( options.maxRequests > 0 && process->processed >= options.maxRequests )) || ( detachingBecauseCapacityNeeded = ( process->sessions == 0 && getWaitlist.empty() && ( !pool->getWaitlist.empty() || anotherGroupIsWaitingForCapacity() ) ) ); bool shouldDisable = process->enabled == Process::DISABLING && process->sessions == 0 && enabledCount > 0; if (shouldDetach || shouldDisable) { vector<Callback> actions; if (shouldDetach) { if (detachingBecauseCapacityNeeded) { /* Someone might be trying to get() a session for a different * group that couldn't be spawned because of lack of pool capacity. * If this group isn't under sufficiently load (as apparent by the * checked conditions) then now's a good time to detach * this process or group in order to free capacity. */ P_DEBUG("Process " << process->inspect() << " is no longer totally " "busy; detaching it in order to make room in the pool"); } else { /* This process has processed its maximum number of requests, * so we detach it. */ P_DEBUG("Process " << process->inspect() << " has reached its maximum number of requests (" << options.maxRequests << "); detaching it"); } pool->detachProcessUnlocked(process, actions); } else { removeProcessFromList(process, disablingProcesses); addProcessToList(process, disabledProcesses); removeFromDisableWaitlist(process, DR_SUCCESS, actions); maybeInitiateOobw(process); } pool->fullVerifyInvariants(); lock.unlock(); runAllActions(actions); } else { // This could change process->enabled. maybeInitiateOobw(process); if (!getWaitlist.empty() && process->enabled == Process::ENABLED) { /* If there are clients on this group waiting for a process to * become available then call them now. */ UPDATE_TRACE_POINT(); // Already calls verifyInvariants(). assignSessionsToGetWaitersQuickly(lock); } } }
static inline void appldata_print_debug(struct appldata_net_sum_data *net_data) { P_DEBUG("--- NET - RECORD ---\n"); P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces); P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets); P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets); P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes); P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes); P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors); P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors); P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped); P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped); P_DEBUG("collisions = %8lu\n", net_data->collisions); P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1); P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2); P_DEBUG("timestamp = %lX\n", net_data->timestamp); }
void Group::detachedProcessesCheckerMain(GroupPtr self) { TRACE_POINT(); PoolPtr pool = getPool(); Pool::DebugSupportPtr debug = pool->debugSupport; if (debug != NULL && debug->detachedProcessesChecker) { debug->debugger->send("About to start detached processes checker"); debug->messages->recv("Proceed with starting detached processes checker"); } boost::unique_lock<boost::mutex> lock(pool->syncher); while (true) { assert(detachedProcessesCheckerActive); if (getLifeStatus() == SHUT_DOWN || this_thread::interruption_requested()) { UPDATE_TRACE_POINT(); P_DEBUG("Stopping detached processes checker"); detachedProcessesCheckerActive = false; break; } UPDATE_TRACE_POINT(); if (!detachedProcesses.empty()) { P_TRACE(2, "Checking whether any of the " << detachedProcesses.size() << " detached processes have exited..."); ProcessList::iterator it = detachedProcesses.begin(); ProcessList::iterator end = detachedProcesses.end(); while (it != end) { const ProcessPtr process = *it; switch (process->getLifeStatus()) { case Process::ALIVE: if (process->canTriggerShutdown()) { P_DEBUG("Detached process " << process->inspect() << " has 0 active sessions now. Triggering shutdown."); process->triggerShutdown(); assert(process->getLifeStatus() == Process::SHUTDOWN_TRIGGERED); } it++; break; case Process::SHUTDOWN_TRIGGERED: if (process->canCleanup()) { P_DEBUG("Detached process " << process->inspect() << " has shut down. Cleaning up associated resources."); process->cleanup(); assert(process->getLifeStatus() == Process::DEAD); it++; removeProcessFromList(process, detachedProcesses); } else if (process->shutdownTimeoutExpired()) { P_WARN("Detached process " << process->inspect() << " didn't shut down within " PROCESS_SHUTDOWN_TIMEOUT_DISPLAY ". Forcefully killing it with SIGKILL."); kill(process->pid, SIGKILL); it++; } else { it++; } break; default: P_BUG("Unknown 'lifeStatus' state " << (int) process->getLifeStatus()); } } } UPDATE_TRACE_POINT(); if (detachedProcesses.empty()) { UPDATE_TRACE_POINT(); P_DEBUG("Stopping detached processes checker"); detachedProcessesCheckerActive = false; vector<Callback> actions; if (shutdownCanFinish()) { UPDATE_TRACE_POINT(); finishShutdown(actions); } verifyInvariants(); verifyExpensiveInvariants(); lock.unlock(); UPDATE_TRACE_POINT(); runAllActions(actions); break; } else { UPDATE_TRACE_POINT(); verifyInvariants(); verifyExpensiveInvariants(); } // Not all processes can be shut down yet. Sleep for a while unless // someone wakes us up. UPDATE_TRACE_POINT(); detachedProcessesCheckerCond.timed_wait(lock, posix_time::milliseconds(100)); } }
VariantMap initializeAgent(int argc, char *argv[], const char *processName) { VariantMap options; const char *seedStr; seedStr = getenv("PASSENGER_RANDOM_SEED"); if (seedStr == NULL || *seedStr == '\0') { randomSeed = (unsigned int) time(NULL); } else { randomSeed = (unsigned int) atoll(seedStr); } srand(randomSeed); srandom(randomSeed); ignoreSigpipe(); if (hasEnvOption("PASSENGER_ABORT_HANDLER", true)) { shouldDumpWithCrashWatch = hasEnvOption("PASSENGER_DUMP_WITH_CRASH_WATCH", true); beepOnAbort = hasEnvOption("PASSENGER_BEEP_ON_ABORT", false); stopOnAbort = hasEnvOption("PASSENGER_STOP_ON_ABORT", false); IGNORE_SYSCALL_RESULT(pipe(emergencyPipe1)); IGNORE_SYSCALL_RESULT(pipe(emergencyPipe2)); installAbortHandler(); } oxt::initialize(); setup_syscall_interruption_support(); if (getenv("PASSENGER_SIMULATE_SYSCALL_FAILURES")) { initializeSyscallFailureSimulation(processName); } setvbuf(stdout, NULL, _IONBF, 0); setvbuf(stderr, NULL, _IONBF, 0); TRACE_POINT(); try { if (argc == 1) { int e; switch (fdIsSocket(FEEDBACK_FD)) { case FISR_YES: _feedbackFdAvailable = true; options.readFrom(FEEDBACK_FD); if (options.getBool("fire_and_forget", false)) { _feedbackFdAvailable = false; close(FEEDBACK_FD); } break; case FISR_NO: fprintf(stderr, "You're not supposed to start this program from the command line. " "It's used internally by Phusion Passenger.\n"); exit(1); break; case FISR_ERROR: e = errno; fprintf(stderr, "Encountered an error in feedback file descriptor 3: %s (%d)\n", strerror(e), e); exit(1); break; } } else { options.readFrom((const char **) argv + 1, argc - 1); } #ifdef __linux__ if (options.has("passenger_root")) { ResourceLocator locator(options.get("passenger_root", true)); string ruby = options.get("default_ruby", false, DEFAULT_RUBY); string path = ruby + " \"" + locator.getHelperScriptsDir() + "/backtrace-sanitizer.rb\""; backtraceSanitizerCommand = strdup(path.c_str()); } #endif if (backtraceSanitizerCommand == NULL) { backtraceSanitizerCommand = "c++filt -n"; backtraceSanitizerPassProgramInfo = false; } options.setDefaultInt("log_level", DEFAULT_LOG_LEVEL); setLogLevel(options.getInt("log_level")); if (!options.get("debug_log_file", false).empty()) { if (strcmp(processName, "PassengerWatchdog") == 0) { /* Have the watchdog set STDOUT and STDERR to the debug * log file so that system abort() calls that stuff * are properly logged. */ string filename = options.get("debug_log_file"); options.erase("debug_log_file"); int fd = open(filename.c_str(), O_CREAT | O_WRONLY | O_APPEND, 0644); if (fd == -1) { int e = errno; throw FileSystemException("Cannot open debug log file " + filename, e, filename); } dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); close(fd); } else { setDebugFile(options.get("debug_log_file").c_str()); } } } catch (const tracable_exception &e) { P_ERROR("*** ERROR: " << e.what() << "\n" << e.backtrace()); exit(1); } // Change process title. argv0 = strdup(argv[0]); strncpy(argv[0], processName, strlen(argv[0])); for (int i = 1; i < argc; i++) { memset(argv[i], '\0', strlen(argv[i])); } P_DEBUG("Random seed: " << randomSeed); return options; }
// The 'self' parameter is for keeping the current Group object alive while this thread is running. void Group::finalizeRestart(GroupPtr self, Options options, RestartMethod method, SpawnerFactoryPtr spawnerFactory, unsigned int restartsInitiated, vector<Callback> postLockActions) { TRACE_POINT(); Pool::runAllActions(postLockActions); postLockActions.clear(); this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; // Create a new spawner. SpawnerPtr newSpawner = spawnerFactory->create(options); SpawnerPtr oldSpawner; UPDATE_TRACE_POINT(); PoolPtr pool = getPool(); Pool::DebugSupportPtr debug = pool->debugSupport; if (debug != NULL && debug->restarting) { this_thread::restore_interruption ri(di); this_thread::restore_syscall_interruption rsi(dsi); this_thread::interruption_point(); debug->debugger->send("About to end restarting"); debug->messages->recv("Finish restarting"); } ScopedLock l(pool->syncher); if (!isAlive()) { P_DEBUG("Group " << name << " is shutting down, so aborting restart"); return; } if (restartsInitiated != this->restartsInitiated) { // Before this restart could be finalized, another restart command was given. // The spawner we just created might be out of date now so we abort. P_DEBUG("Restart of group " << name << " aborted because a new restart was initiated concurrently"); if (debug != NULL && debug->restarting) { debug->debugger->send("Restarting aborted"); } return; } // Run some sanity checks. pool->fullVerifyInvariants(); assert(m_restarting); UPDATE_TRACE_POINT(); // Atomically swap the new spawner with the old one. resetOptions(options); oldSpawner = spawner; spawner = newSpawner; m_restarting = false; if (shouldSpawn()) { spawn(); } else if (isWaitingForCapacity()) { P_INFO("Group " << name << " is waiting for capacity to become available. " "Trying to shutdown another idle process to free capacity..."); if (pool->forceFreeCapacity(this, postLockActions) != NULL) { spawn(); } else { P_INFO("There are no processes right now that are eligible " "for shutdown. Will try again later."); } } verifyInvariants(); l.unlock(); oldSpawner.reset(); Pool::runAllActions(postLockActions); P_DEBUG("Restart of group " << name << " done"); if (debug != NULL && debug->restarting) { debug->debugger->send("Restarting done"); } }
/* * appldata_net_exit() * * unregister ops */ static void __exit appldata_net_exit(void) { appldata_unregister_ops(&ops); P_DEBUG("%s-ops unregistered!\n", ops.name); }