bool DCStartd::_suspendClaim( ) { setCmdStr( "suspendClaim" ); if( ! checkClaimId() ) { return false; } if( ! checkAddr() ) { return false; } // if this claim is associated with a security session ClaimIdParser cidp(claim_id); char const *sec_session = cidp.secSessionId(); if (IsDebugLevel(D_COMMAND)) { int cmd = SUSPEND_CLAIM; dprintf (D_COMMAND, "DCStartd::_suspendClaim(%s,...) making connection to %s\n", getCommandStringSafe(cmd), _addr ? _addr : "NULL"); } bool result; ReliSock reli_sock; reli_sock.timeout(20); // years of research... :) if( ! reli_sock.connect(_addr) ) { std::string err = "DCStartd::_suspendClaim: "; err += "Failed to connect to startd ("; err += _addr ? _addr : "NULL"; err += ')'; newError( CA_CONNECT_FAILED, err.c_str() ); return false; } int cmd = SUSPEND_CLAIM; result = startCommand( cmd, (Sock*)&reli_sock, 20, NULL, NULL, false, sec_session ); if( ! result ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::_suspendClaim: Failed to send command " ); return false; } // Now, send the ClaimId if( ! reli_sock.put_secret(claim_id) ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::_suspendClaim: Failed to send ClaimId to the startd" ); return false; } if( ! reli_sock.end_of_message() ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::_suspendClaim: Failed to send EOM to the startd" ); return false; } return true; }
char const * DCMsg::name() { if( m_cmd_str ) { return m_cmd_str; } m_cmd_str = getCommandStringSafe( m_cmd ); return m_cmd_str; }
bool DCStarter::createJobOwnerSecSession(int timeout,char const *job_claim_id,char const *starter_sec_session,char const *session_info,MyString &owner_claim_id,MyString &error_msg,MyString &starter_version,MyString &starter_addr) { ReliSock sock; if (IsDebugLevel(D_COMMAND)) { dprintf (D_COMMAND, "DCStarter::createJobOwnerSecSession(%s,...) making connection to %s\n", getCommandStringSafe(CREATE_JOB_OWNER_SEC_SESSION), _addr ? _addr : "NULL"); } if( !connectSock(&sock, timeout, NULL) ) { error_msg = "Failed to connect to starter"; return false; } if( !startCommand(CREATE_JOB_OWNER_SEC_SESSION, &sock,timeout,NULL,NULL,false,starter_sec_session) ) { error_msg = "Failed to send CREATE_JOB_OWNER_SEC_SESSION to starter"; return false; } ClassAd input; input.Assign(ATTR_CLAIM_ID,job_claim_id); input.Assign(ATTR_SESSION_INFO,session_info); sock.encode(); if( !putClassAd(&sock, input) || !sock.end_of_message() ) { error_msg = "Failed to compose CREATE_JOB_OWNER_SEC_SESSION to starter"; return false; } sock.decode(); ClassAd reply; if( !getClassAd(&sock, reply) || !sock.end_of_message() ) { error_msg = "Failed to get response to CREATE_JOB_OWNER_SEC_SESSION from starter"; return false; } bool success = false; reply.LookupBool(ATTR_RESULT,success); if( !success ) { reply.LookupString(ATTR_ERROR_STRING,error_msg); return false; } reply.LookupString(ATTR_CLAIM_ID,owner_claim_id); reply.LookupString(ATTR_VERSION,starter_version); // get the full starter address from the starter in case it contains // extra CCB info that we don't already know about reply.LookupString(ATTR_STARTER_IP_ADDR,starter_addr); return true; }
bool DCStartd::checkpointJob( const char* name_ckpt ) { dprintf( D_FULLDEBUG, "Entering DCStartd::checkpointJob(%s)\n", name_ckpt ); setCmdStr( "checkpointJob" ); if (IsDebugLevel(D_COMMAND)) { int cmd = PCKPT_JOB; dprintf (D_COMMAND, "DCStartd::checkpointJob(%s,...) making connection to %s\n", getCommandStringSafe(cmd), _addr ? _addr : "NULL"); } bool result; ReliSock reli_sock; reli_sock.timeout(20); // years of research... :) if( ! reli_sock.connect(_addr) ) { std::string err = "DCStartd::checkpointJob: "; err += "Failed to connect to startd ("; err += _addr ? _addr : "NULL"; err += ')'; newError( CA_CONNECT_FAILED, err.c_str() ); return false; } int cmd = PCKPT_JOB; result = startCommand( cmd, (Sock*)&reli_sock ); if( ! result ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::checkpointJob: Failed to send command PCKPT_JOB to the startd" ); return false; } // Now, send the name if( ! reli_sock.put(name_ckpt) ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::checkpointJob: Failed to send Name to the startd" ); return false; } if( ! reli_sock.end_of_message() ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::checkpointJob: Failed to send EOM to the startd" ); return false; } // we're done dprintf( D_FULLDEBUG, "DCStartd::checkpointJob: " "successfully sent command\n" ); return true; }
bool DCStartd::vacateClaim( const char* name_vacate ) { setCmdStr( "vacateClaim" ); if (IsDebugLevel(D_COMMAND)) { int cmd = VACATE_CLAIM; dprintf (D_COMMAND, "DCStartd::vacateClaim(%s,...) making connection to %s\n", getCommandStringSafe(cmd), _addr ? _addr : "NULL"); } bool result; ReliSock reli_sock; reli_sock.timeout(20); // years of research... :) if( ! reli_sock.connect(_addr) ) { std::string err = "DCStartd::vacateClaim: "; err += "Failed to connect to startd ("; err += _addr ? _addr : "NULL"; err += ')'; newError( CA_CONNECT_FAILED, err.c_str() ); return false; } int cmd = VACATE_CLAIM; result = startCommand( cmd, (Sock*)&reli_sock ); if( ! result ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::vacateClaim: Failed to send command PCKPT_JOB to the startd" ); return false; } if( ! reli_sock.put(name_vacate) ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::vacateClaim: Failed to send Name to the startd" ); return false; } if( ! reli_sock.end_of_message() ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::vacateClaim: Failed to send EOM to the startd" ); return false; } return true; }
bool DCStartd::deactivateClaim( bool graceful, bool *claim_is_closing ) { dprintf( D_FULLDEBUG, "Entering DCStartd::deactivateClaim(%s)\n", graceful ? "graceful" : "forceful" ); if( claim_is_closing ) { *claim_is_closing = false; } setCmdStr( "deactivateClaim" ); if( ! checkClaimId() ) { return false; } if( ! checkAddr() ) { return false; } // if this claim is associated with a security session ClaimIdParser cidp(claim_id); char const *sec_session = cidp.secSessionId(); if (IsDebugLevel(D_COMMAND)) { int cmd = graceful ? DEACTIVATE_CLAIM : DEACTIVATE_CLAIM_FORCIBLY; dprintf (D_COMMAND, "DCStartd::deactivateClaim(%s,...) making connection to %s\n", getCommandStringSafe(cmd), _addr ? _addr : "NULL"); } bool result; ReliSock reli_sock; reli_sock.timeout(20); // years of research... :) if( ! reli_sock.connect(_addr) ) { std::string err = "DCStartd::deactivateClaim: "; err += "Failed to connect to startd ("; err += _addr ? _addr : "NULL"; err += ')'; newError( CA_CONNECT_FAILED, err.c_str() ); return false; } int cmd; if( graceful ) { cmd = DEACTIVATE_CLAIM; } else { cmd = DEACTIVATE_CLAIM_FORCIBLY; } result = startCommand( cmd, (Sock*)&reli_sock, 20, NULL, NULL, false, sec_session ); if( ! result ) { std::string err = "DCStartd::deactivateClaim: "; err += "Failed to send command "; if( graceful ) { err += "DEACTIVATE_CLAIM"; } else { err += "DEACTIVATE_CLAIM_FORCIBLY"; } err += " to the startd"; newError( CA_COMMUNICATION_ERROR, err.c_str() ); return false; } // Now, send the ClaimId if( ! reli_sock.put_secret(claim_id) ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::deactivateClaim: Failed to send ClaimId to the startd" ); return false; } if( ! reli_sock.end_of_message() ) { newError( CA_COMMUNICATION_ERROR, "DCStartd::deactivateClaim: Failed to send EOM to the startd" ); return false; } reli_sock.decode(); ClassAd response_ad; if( !getClassAd(&reli_sock, response_ad) || !reli_sock.end_of_message() ) { dprintf( D_FULLDEBUG, "DCStartd::deactivateClaim: failed to read response ad.\n"); // The response ad is not critical and is expected to be missing // if the startd is from before 7.0.5. } else { bool start = true; response_ad.LookupBool(ATTR_START,start); if( claim_is_closing ) { *claim_is_closing = !start; } } // we're done dprintf( D_FULLDEBUG, "DCStartd::deactivateClaim: " "successfully sent command\n" ); return true; }
void DCMessenger::startCommand( classy_counted_ptr<DCMsg> msg ) { MyString error; msg->setMessenger( this ); if( msg->deliveryStatus() == DCMsg::DELIVERY_CANCELED ) { msg->callMessageSendFailed( this ); return; } time_t deadline = msg->getDeadline(); if( deadline && deadline < time(NULL) ) { msg->addError(CEDAR_ERR_DEADLINE_EXPIRED, "deadline for delivery of this message expired"); msg->callMessageSendFailed( this ); return; } // For a UDP message, we may need to register two sockets, one for // the SafeSock and another for a ReliSock to establish the // security session. Stream::stream_type st = msg->getStreamType(); if( daemonCore->TooManyRegisteredSockets(-1,&error,st==Stream::safe_sock?2:1) ) { // Try again in a sec // Eventually, it would be better to queue this centrally // (i.e. in DaemonCore) rather than having an independent // timer for each case. Then it would be possible to control // priority of different messages etc. dprintf(D_FULLDEBUG, "Delaying delivery of %s to %s, because %s\n", msg->name(),peerDescription(),error.Value()); startCommandAfterDelay( 1, msg ); return; } // Currently, there may be only one pending operation per messenger. ASSERT(!m_callback_msg.get()); ASSERT(!m_callback_sock); ASSERT(m_pending_operation == NOTHING_PENDING); m_pending_operation = START_COMMAND_PENDING; m_callback_msg = msg; m_callback_sock = m_sock.get(); if( !m_callback_sock ) { if (IsDebugLevel(D_COMMAND)) { const char * addr = m_daemon->addr(); const int cmd = msg->m_cmd; dprintf (D_COMMAND, "DCMessenger::startCommand(%s,...) making non-blocking connection to %s\n", getCommandStringSafe(cmd), addr ? addr : "NULL"); } const bool nonblocking = true; m_callback_sock = m_daemon->makeConnectedSocket(st,msg->getTimeout(),msg->getDeadline(),&msg->m_errstack,nonblocking); if( !m_callback_sock ) { msg->callMessageSendFailed( this ); return; } } incRefCount(); m_daemon->startCommand_nonblocking ( msg->m_cmd, m_callback_sock, msg->getTimeout(), &msg->m_errstack, &DCMessenger::connectCallback, this, msg->name(), msg->getRawProtocol(), msg->getSecSessionId()); }
bool DCStarter::peek(bool transfer_stdout, ssize_t &stdout_offset, bool transfer_stderr, ssize_t &stderr_offset, const std::vector<std::string> &filenames, std::vector<ssize_t> &offsets, size_t max_bytes, bool &retry_sensible, PeekGetFD &next, std::string &error_msg, unsigned timeout, const std::string &sec_session_id, DCTransferQueue *xfer_q) { compat_classad::ClassAd ad; ad.InsertAttr(ATTR_JOB_OUTPUT, transfer_stdout); ad.InsertAttr("OutOffset", stdout_offset); ad.InsertAttr(ATTR_JOB_ERROR, transfer_stderr); ad.InsertAttr("ErrOffset", stderr_offset); ad.InsertAttr(ATTR_VERSION, CondorVersion()); size_t total_files = 0; total_files += transfer_stdout ? 1 : 0; total_files += transfer_stderr ? 1 : 0; if (filenames.size()) { total_files += filenames.size(); std::vector<classad::ExprTree *> filelist; filelist.reserve(filenames.size()); std::vector<classad::ExprTree *> offsetlist; offsetlist.reserve(filenames.size()); std::vector<ssize_t>::const_iterator it2 = offsets.begin(); for (std::vector<std::string>::const_iterator it = filenames.begin(); it != filenames.end() && it2 != offsets.end(); it++, it2++) { classad::Value value; value.SetStringValue(*it); filelist.push_back(classad::Literal::MakeLiteral(value)); value.SetIntegerValue(*it2); offsetlist.push_back(classad::Literal::MakeLiteral(value)); } classad::ExprTree *list(classad::ExprList::MakeExprList(filelist)); ad.Insert("TransferFiles", list); list = classad::ExprList::MakeExprList(offsetlist); ad.Insert("TransferOffsets", list); } ad.InsertAttr(ATTR_MAX_TRANSFER_BYTES, static_cast<long long>(max_bytes)); ReliSock sock; if (IsDebugLevel(D_COMMAND)) { dprintf (D_COMMAND, "DCStarter::peek(%s,...) making connection to %s\n", getCommandStringSafe(STARTER_PEEK), _addr ? _addr : "NULL"); } if( !connectSock(&sock, timeout, NULL) ) { error_msg = "Failed to connect to starter"; return false; } if( !startCommand(STARTER_PEEK, &sock, timeout, NULL, NULL, false, sec_session_id.c_str()) ) { error_msg = "Failed to send START_PEEK to starter"; return false; } sock.encode(); if (!putClassAd(&sock, ad) || !sock.end_of_message()) { error_msg = "Failed to send request to starter"; return false; } compat_classad::ClassAd response; sock.decode(); if (!getClassAd(&sock, response) || !sock.end_of_message()) { error_msg = "Failed to read response for peeking at logs."; return false; } dPrintAd(D_FULLDEBUG, response); bool success = false; if (!response.EvaluateAttrBool(ATTR_RESULT, success) || !success) { response.EvaluateAttrBool(ATTR_RETRY, retry_sensible); error_msg = "Remote operation failed."; response.EvaluateAttrString(ATTR_ERROR_STRING, error_msg); return false; } classad::Value valueX; classad_shared_ptr<classad::ExprList> list; if (!response.EvaluateAttr("TransferFiles", valueX) || !valueX.IsSListValue(list)) { error_msg = "Unable to evaluate starter response"; return false; } classad_shared_ptr<classad::ExprList> offlist; if (!response.EvaluateAttr("TransferOffsets", valueX) || !valueX.IsSListValue(offlist)) { error_msg = "Unable to evaluate starter response (missing offsets)"; return false; } size_t remaining = max_bytes; size_t file_count = 0; classad::ExprList::const_iterator it2 = offlist->begin(); for (classad::ExprList::const_iterator it = list->begin(); it != list->end() && it2 != offlist->end(); it++, it2++) { classad::Value value; (*it2)->Evaluate(value); off_t off = -1; value.IsIntegerValue(off); (*it)->Evaluate(value); std::string filename; int64_t xfer_fd = -1; if (!value.IsStringValue(filename) && value.IsIntegerValue(xfer_fd)) { if (xfer_fd == 0) filename = "_condor_stdout"; if (xfer_fd == 1) filename = "_condor_stderr"; } int fd = next.getNextFD(filename); filesize_t size = -1; int retval; if ((retval = sock.get_file(&size, fd, false, false, remaining, xfer_q)) && (retval != GET_FILE_MAX_BYTES_EXCEEDED)) { error_msg = "Internal error when transferring file " + filename; } else if (size >= 0) { remaining -= max_bytes; file_count++; off += size; } else { error_msg = "Failed to transfer file " + filename; } if (xfer_fd == 0) { stdout_offset = off; //dprintf(D_FULLDEBUG, "New stdout offset: %ld\n", stdout_offset); } else if (xfer_fd == 1) { stderr_offset = off; } else { std::vector<ssize_t>::iterator it4 = offsets.begin(); for (std::vector<std::string>::const_iterator it3 = filenames.begin(); it3 != filenames.end() && it4 != offsets.end(); it3++, it4++) { if (*it3 == filename) *it4 = off; } } } size_t remote_file_count; if (!sock.get(remote_file_count) || !sock.end_of_message()) { error_msg = "Unable to get remote file count."; return false; } if (file_count != remote_file_count) { formatstr(error_msg, "Received %ld files, but remote side thought it sent %ld files\n", file_count, remote_file_count); return false; } if ((total_files != file_count) && !error_msg.size()) { error_msg = "At least one file transfer failed."; return false; } return true; }
bool DCStarter::startSSHD(char const *known_hosts_file,char const *private_client_key_file,char const *preferred_shells,char const *slot_name,char const *ssh_keygen_args,ReliSock &sock,int timeout,char const *sec_session_id,MyString &remote_user,MyString &error_msg,bool &retry_is_sensible) { retry_is_sensible = false; #ifndef HAVE_SSH_TO_JOB error_msg = "This version of Condor does not support ssh key exchange."; return false; #else if (IsDebugLevel(D_COMMAND)) { dprintf (D_COMMAND, "DCStarter::startSSHD(%s,...) making connection to %s\n", getCommandStringSafe(START_SSHD), _addr ? _addr : "NULL"); } if( !connectSock(&sock, timeout, NULL) ) { error_msg = "Failed to connect to starter"; return false; } if( !startCommand(START_SSHD, &sock,timeout,NULL,NULL,false,sec_session_id) ) { error_msg = "Failed to send START_SSHD to starter"; return false; } ClassAd input; if( preferred_shells && *preferred_shells ) { input.Assign(ATTR_SHELL,preferred_shells); } if( slot_name && *slot_name ) { // This is a little silly. // We are telling the remote side the name of the slot so // that it can put it in the welcome message. input.Assign(ATTR_NAME,slot_name); } if( ssh_keygen_args && *ssh_keygen_args ) { input.Assign(ATTR_SSH_KEYGEN_ARGS,ssh_keygen_args); } sock.encode(); if( !putClassAd(&sock, input) || !sock.end_of_message() ) { error_msg = "Failed to send START_SSHD request to starter"; return false; } ClassAd result; sock.decode(); if( !getClassAd(&sock, result) || !sock.end_of_message() ) { error_msg = "Failed to read response to START_SSHD from starter"; return false; } bool success = false; result.LookupBool(ATTR_RESULT,success); if( !success ) { std::string remote_error_msg; result.LookupString(ATTR_ERROR_STRING,remote_error_msg); error_msg.formatstr("%s: %s",slot_name,remote_error_msg.c_str()); retry_is_sensible = false; result.LookupBool(ATTR_RETRY,retry_is_sensible); return false; } result.LookupString(ATTR_REMOTE_USER,remote_user); std::string public_server_key; if( !result.LookupString(ATTR_SSH_PUBLIC_SERVER_KEY,public_server_key) ) { error_msg = "No public ssh server key received in reply to START_SSHD"; return false; } std::string private_client_key; if( !result.LookupString(ATTR_SSH_PRIVATE_CLIENT_KEY,private_client_key) ) { error_msg = "No ssh client key received in reply to START_SSHD"; return false; } // store the private client key unsigned char *decode_buf = NULL; int length = -1; condor_base64_decode(private_client_key.c_str(),&decode_buf,&length); if( !decode_buf ) { error_msg = "Error decoding ssh client key."; return false; } FILE *fp = safe_fcreate_fail_if_exists(private_client_key_file,"a",0400); if( !fp ) { error_msg.formatstr("Failed to create %s: %s", private_client_key_file,strerror(errno)); free( decode_buf ); return false; } if( fwrite(decode_buf,length,1,fp)!=1 ) { error_msg.formatstr("Failed to write to %s: %s", private_client_key_file,strerror(errno)); fclose( fp ); free( decode_buf ); return false; } if( fclose(fp)!=0 ) { error_msg.formatstr("Failed to close %s: %s", private_client_key_file,strerror(errno)); free( decode_buf ); return false; } fp = NULL; free( decode_buf ); decode_buf = NULL; // store the public server key in the known_hosts file length = -1; condor_base64_decode(public_server_key.c_str(),&decode_buf,&length); if( !decode_buf ) { error_msg = "Error decoding ssh server key."; return false; } fp = safe_fcreate_fail_if_exists(known_hosts_file,"a",0600); if( !fp ) { error_msg.formatstr("Failed to create %s: %s", known_hosts_file,strerror(errno)); free( decode_buf ); return false; } // prepend a host name pattern (*) to the public key to make a valid // record in the known_hosts file fprintf(fp,"* "); if( fwrite(decode_buf,length,1,fp)!=1 ) { error_msg.formatstr("Failed to write to %s: %s", known_hosts_file,strerror(errno)); fclose( fp ); free( decode_buf ); return false; } if( fclose(fp)!=0 ) { error_msg.formatstr("Failed to close %s: %s", known_hosts_file,strerror(errno)); free( decode_buf ); return false; } fp = NULL; free( decode_buf ); decode_buf = NULL; return true; #endif }
bool DCTransferQueue::RequestTransferQueueSlot(bool downloading,filesize_t sandbox_size,char const *fname,char const *jobid,char const *queue_user,int timeout,MyString &error_desc) { ASSERT(fname); ASSERT(jobid); if( GoAheadAlways( downloading ) ) { m_xfer_downloading = downloading; m_xfer_fname = fname; m_xfer_jobid = jobid; return true; } CheckTransferQueueSlot(); if( m_xfer_queue_sock ) { // A request has already been made. // Currently, this is a no-op, because any upload/download slot // is as good as any other. In the future, there may be // different queues for different paths. ASSERT( m_xfer_downloading == downloading ); m_xfer_fname = fname; m_xfer_jobid = jobid; return true; } time_t started = time(NULL); CondorError errstack; // Our caller has to finish this operation in the specified // amount of time or risk not responding to the file transfer // peer in time, so ignore the timeout multiplier and set the // timeout exactly as specified. m_xfer_queue_sock = reliSock( timeout, 0, &errstack, false, true ); if( !m_xfer_queue_sock ) { formatstr(m_xfer_rejected_reason, "Failed to connect to transfer queue manager for job %s (%s): %s.", jobid, fname, errstack.getFullText().c_str() ); error_desc = m_xfer_rejected_reason; dprintf(D_ALWAYS,"%s\n",m_xfer_rejected_reason.c_str()); return false; } if( timeout ) { timeout -= time(NULL)-started; if( timeout <= 0 ) { timeout = 1; } } if (IsDebugLevel(D_COMMAND)) { int cmd = TRANSFER_QUEUE_REQUEST; dprintf (D_COMMAND, "DCTransferQueue::RequestTransferQueueSlot(%s,...) making connection to %s\n", getCommandStringSafe(cmd), _addr ? _addr : "NULL"); } bool connected = startCommand( TRANSFER_QUEUE_REQUEST, m_xfer_queue_sock, timeout, &errstack ); if( !connected ) { delete m_xfer_queue_sock; m_xfer_queue_sock = NULL; formatstr(m_xfer_rejected_reason, "Failed to initiate transfer queue request for job %s (%s): %s.", jobid, fname, errstack.getFullText().c_str() ); error_desc = m_xfer_rejected_reason; dprintf(D_ALWAYS,"%s\n",m_xfer_rejected_reason.c_str()); return false; } m_xfer_downloading = downloading; m_xfer_fname = fname; m_xfer_jobid = jobid; ClassAd msg; msg.Assign(ATTR_DOWNLOADING,downloading); msg.Assign(ATTR_FILE_NAME,fname); msg.Assign(ATTR_JOB_ID,jobid); msg.Assign(ATTR_USER,queue_user); msg.Assign(ATTR_SANDBOX_SIZE,sandbox_size); m_xfer_queue_sock->encode(); if( !putClassAd(m_xfer_queue_sock, msg) || !m_xfer_queue_sock->end_of_message() ) { formatstr(m_xfer_rejected_reason, "Failed to write transfer request to %s for job %s " "(initial file %s).", m_xfer_queue_sock->peer_description(), m_xfer_jobid.c_str(), m_xfer_fname.c_str()); error_desc = m_xfer_rejected_reason; dprintf(D_ALWAYS,"%s\n",m_xfer_rejected_reason.c_str()); return false; } m_xfer_queue_sock->decode(); // Request has been initiated. Now sender should call // PollForTransferQueueSlot() to get response. m_xfer_queue_pending = true; return true; }