void getChunkServers(AdminServicePrx& adminSP) { Addresses addresses = adminSP->getChunkServers(); for (int i = 0; i < addresses.size(); i++){ cout << "ChunkServer [" << i << "] NAME=" << addresses[i].name << " PORT=" << addresses[i].port << endl; } }
int32_t MasterDB::cancelJob(int32_t jobId){ // cancel the job Job job = this->m_jobs.getJob(jobId); if (job.first == -1){ LOG4CPLUS_WARN(m_logger, "MasterDB::cancelJob() Job doesnot exist: " << jobId); return -1; } LOG4CPLUS_DEBUG(m_logger, "MasterDB::cancelJob() cancel job " << job); if (job.second.status == JS_INPROGRESS){ // set the job status as abandoned this->m_jobs.setJobStatus(jobId, JS_CANCELLED); // set all the idle tasks as abandoned to prevent them from being assigned this->m_tasks.setTasksCancelled(jobId); // release resources, add clean task Addresses workers = this->m_workers.getAllWorkers(); for (int i = 0; i < workers.size(); i++){ this->m_tasks.addCleanTask(jobId, workers[i]); } } // endif return 0; }
int32_t JobServiceImpl::create(const MapReduceJob& job, const Ice::Current&) { if (m_db->m_workers.isSafeMode()){ ///@todo throw SafeModeException LOG4CPLUS_DEBUG(m_logger, "JobServiceImpl::create() We are in safe mode"); return -1; } // we do not assign task when in safe mode WorkerManager::ToggleSafeMode safeMode(m_db->m_workers); // get chunks info of the input file(s) LocatedChunks allLocatedChunks; try{ vector<string> names; FilesInfo files; FileInfo fileinfo = m_db->m_tfs.getFileInfo(job.inputFile); if (fileinfo.flag & FileModeTypeDir){ // the input is a dir files = m_db->m_tfs.listDirectory(job.inputFile); string dir = job.inputFile; if (dir[dir.size() - 1] != '/') { dir += '/'; } for (int i = 0; i < files.size(); i++){ names.push_back(dir + files[i].name); } } else{ // the input is a file names.push_back(job.inputFile); } for (int i = 0; i < names.size(); i++){ // for each file in the dir LocatedChunks lchks = m_db->m_tfsFile.getChunksInfo(names[i]); allLocatedChunks.insert(allLocatedChunks.begin(), lchks.begin(), lchks.end()); LOG4CPLUS_DEBUG(m_logger, "Input file " << i+1 << " " << names[i] << " has " << lchks.size() << " chunks"); } LOG4CPLUS_DEBUG(m_logger, "Input file(s) " << job.inputFile << " totally has " << allLocatedChunks.size() << " chunks"); if (allLocatedChunks.empty()){ LOG4CPLUS_WARN(m_logger, "JobServiceImpl::create() TFS file or dir" << job.inputFile << " doesn't exist or has no chunks"); return -1; } }catch(std::exception &ex){ LOG4CPLUS_WARN(m_logger, "JobServiceImpl::create() catch " << ex.what()); return -1; } catch(...){ LOG4CPLUS_WARN(m_logger, "JobServiceImpl::create(): Unknown tfs exception!!!"); return -1; } // create output dir if necessary try{ if (!m_db->m_tfs.existDirectory(job.outputFile)){ m_db->m_tfs.createDirectory(job.outputFile); } } catch(std::exception &ex){ LOG4CPLUS_WARN(m_logger, "JobServiceImpl::create() catch " << ex.what()); return -1; } MapReduceJob jobSpec(job); // we assign each chunk a map task jobSpec.mapTaskNum = allLocatedChunks.size(); // the number of reduce tasks is less than the number of workers or too large if (jobSpec.reduceTaskNum < MasterConfig::MinReduceTaskNumPerJob){ jobSpec.reduceTaskNum = MasterConfig::MinReduceTaskNumPerJob; } else if (jobSpec.reduceTaskNum > MasterConfig::MaxReduceTaskNumPerJob){ jobSpec.reduceTaskNum = MasterConfig::MaxReduceTaskNumPerJob; } int32_t jobId = m_db->m_jobs.add(jobSpec, allLocatedChunks); LOG4CPLUS_INFO(m_logger, "Create job for " << jobSpec); // add map tasks vector<int32_t> mapTaskIds; try{ for (int i = 0; i < allLocatedChunks.size(); i++){ Address address; Addresses candidates; for (int j = 0; j < allLocatedChunks[i].locations.size(); j++){ address.name = allLocatedChunks[i].locations[j].name; address.port = allLocatedChunks[i].locations[j].port; candidates.push_back(address); } Address worker = m_db->m_workers.selectWorker(candidates); int32_t taskId = m_db->m_tasks.addMapTask(jobId, i, worker); mapTaskIds.push_back(taskId); } }catch (SafeModeException &ex){ LOG4CPLUS_WARN(m_logger, "JobServiceImpl::create() catch " << ex.what()); m_db->cancelJob(jobId); return -1; } // add reduce tasks Addresses workers = m_db->m_workers.getAllWorkers(); for (int i = 0; i < jobSpec.reduceTaskNum; i++){ LOG4CPLUS_DEBUG(m_logger, " reduceTask worker " << workers[i%workers.size()] << " "<< i << " "<< workers.size() ); int32_t reduceTaskId = m_db->m_tasks.addReduceTask(jobId, i, workers[i%workers.size()]); // add trans tasks for (int j = 0; j < mapTaskIds.size(); j++){ m_db->m_tasks.addTransTask(jobId, mapTaskIds[j], reduceTaskId); } } // @add by Chen Rishan // add job schedule infomation in workermanager // Tasks tasks = m_db->m_tasks.getAllTasks(jobId); // Job waitingJob = m_db->m_jobs.getJob(jobId); // JobPriority jobPrio = waitingJob.second.jobInitPriority; // m_db->m_workers.setJobPriority(tasks, jobPrio); LOG4CPLUS_INFO(m_logger, "Created job " << jobId); return jobId; }
int getLocalInterfaceList(InterfaceRefList& iflist, const bool onlyUp) { int sock, num = 0, ret = -1; #define REQ_BUF_SIZE (sizeof(struct ifreq) * 20) struct { struct ifconf ifc; char buf[REQ_BUF_SIZE]; } req = { { REQ_BUF_SIZE, { req.buf}}, { 0 } }; sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock == INVALID_SOCKET) { HAGGLE_ERR("Could not open socket\n"); return -1; } ret = ioctl(sock, SIOCGIFCONF, &req); if (ret < 0) { HAGGLE_ERR("ioctl() failed\n"); return -1; } struct ifreq *ifr = (struct ifreq *) req.buf; int len = 0; for (; req.ifc.ifc_len != 0; ifr = (struct ifreq *) ((char *) ifr + len), req.ifc.ifc_len -= len) { Addresses addrs; unsigned char macaddr[6]; len = (sizeof(ifr->ifr_name) + max(sizeof(struct sockaddr), ifr->ifr_addr.sa_len)); if (ifr->ifr_addr.sa_family != AF_LINK // || strncmp(ifr->ifr_name, "en", 2) != 0 ) { continue; } struct sockaddr_dl *ifaddr = (struct sockaddr_dl *) &ifr->ifr_addr; // Type 6 seems to be Ethernet if (ifaddr->sdl_type != 6) { continue; } memcpy(macaddr, LLADDR(ifaddr), 6); addrs.add(new EthernetAddress(macaddr)); ifr->ifr_addr.sa_family = AF_INET; if (ioctl(sock, SIOCGIFADDR, ifr) != -1) { addrs.add(new IPv4Address(((struct sockaddr_in *) &ifr->ifr_addr)->sin_addr)); } if (ioctl(sock, SIOCGIFBRDADDR, ifr) != -1) { addrs.add(new IPv4BroadcastAddress(((struct sockaddr_in *) &ifr->ifr_broadaddr)->sin_addr)); } #if defined(ENABLE_IPv6) ifr->ifr_addr.sa_family = AF_INET6; if (ioctl(sock, SIOCGIFADDR, ifr) != -1) { addrs.add(new IPv6Address(((struct sockaddr_in6 *) &ifr->ifr_addr)->sin6_addr)); } if (ioctl(sock, SIOCGIFBRDADDR, ifr) != -1) { addrs.add(new IPv6BroadcastAddress(((struct sockaddr_in6 *) &ifr->ifr_broadaddr)->sin6_addr)); } #endif if (ioctl(sock, SIOCGIFFLAGS, ifr) == -1) { continue; } if (onlyUp && !(ifr->ifr_flags & IFF_UP)) continue; if (addrs.size() <= 1) { // No IPv4 or IPv6 addresses on interface --> ignore it continue; } // FIXME: separate 802.3 (wired) from 802.11 (wireless) ethernet iflist.push_back(InterfaceRef(Interface::create(Interface::TYPE_ETHERNET, macaddr, ifr->ifr_name, addrs, IFFLAG_UP | IFFLAG_LOCAL))); num++; } close(sock); return num; }