bool NSys::setEnv(const nstr& key, const nstr& value, bool redef){ int r = setenv(key.c_str(), value.c_str(), redef); if(r == 0){ return true; } else{ return false; } }
void write(const nstr& in){ if(ifd_ < 0){ NERROR("command was not started with input mode"); } int n = ::write(ifd_, in.c_str(), in.length()); if(n < 0){ NERROR("error while writing"); } }
bool NSys::getEnv(const nstr& key, nstr& value){ const char* v = getenv(key.c_str()); if(!v){ return false; } value = v; return true; }
bool NSys::exists(const nstr& path){ // TODO - there is a better way to do this? ifstream f(path.c_str()); if(f.fail()){ return false; } else{ return true; } }
NRegex_(NRegex* o, const nstr& pattern, uint32_t flags) : o_(o){ try{ #ifdef __APPLE__ regex_ = new regex(pattern.c_str()); #else nstr normPattern = pattern; normPattern.findReplace("[^]", "."); regex_ = new regex(normPattern.c_str()); #endif } catch(std::exception& e){ NERROR("invalid pattern: " + pattern); } }
bool NSys::dirFiles(const nstr& dirPath, nvec& files){ DIR* dir = opendir(dirPath.c_str()); if(!dir){ return false; } dirent* de; while((de = readdir(dir))){ nstr p = de->d_name; if(p != "." && p != ".."){ files.push_back(de->d_name); } } closedir(dir); return true; }
nstr NSys::fileToStr(const nstr& path){ FILE* file = fopen(path.c_str(), "rb"); if(!file){ return ""; } fseek(file, 0, SEEK_END); long size = ftell(file); rewind(file); char* buf = (char*)malloc(sizeof(char)*size); int r = fread(buf, 1, size, file); fclose(file); nstr ret; ret.append(buf, size); free(buf); return ret; }
bool NSys::rename(const nstr& sourcePath, const nstr& destPath){ int status = ::rename(sourcePath.c_str(), destPath.c_str()); return status == 0; }
void NSys::setTimeZone(const nstr& zone){ setenv("TZ", zone.c_str(), true); tzset(); }
bool NSys::makeDir(const nstr& path){ return mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) == 0; }
NCommand_(NCommand* o, const nstr& command, int mode) : o_(o), command_(command), closeSignal_(15), ifd_(-1), ofd_(-1), efd_(-1), mode_(mode){ int ip[2]; int op[2]; int ep[2]; if(mode_ & NCommand::OutputWithError && (mode_ & NCommand::Output || mode_ & NCommand::Error)){ NERROR("OutputWithError mode cannot be combined with Output or " "Error modes"); } if(mode_ & NCommand::Input){ pipe(ip); } if(mode_ & NCommand::Output){ pipe(op); } if(mode_ & NCommand::Error){ pipe(ep); } if(mode_ & NCommand::OutputWithError){ pipe(op); } if(!isPersistent()){ _resourceManager->add(o_); } pid_ = fork(); if(pid_ < 0){ NERROR("failed to execute command: " + command); } if(pid_ == 0){ if(mode_ & NCommand::Input){ ::close(ip[1]); dup2(ip[0], 0); ::close(ip[0]); } if(mode_ & NCommand::Output){ ::close(op[0]); dup2(op[1], 1); ::close(op[1]); } if(mode_ & NCommand::Error){ ::close(ep[0]); dup2(ep[1], 2); ::close(ep[1]); } if(mode_ & NCommand::OutputWithError){ ::close(op[0]); dup2(op[1], 1); dup2(op[1], 2); ::close(op[1]); } if(execl("/bin/bash", "bash", "-c", command.c_str(), NULL) < 0){ NERROR("failed to execute command: " + command); } } else{ if(mode_ & NCommand::Input){ ::close(ip[0]); ifd_ = ip[1]; } if(mode_ & NCommand::Output){ ::close(op[1]); ofd_ = op[0]; fcntl(ofd_, F_SETFL, O_NONBLOCK); } if(mode_ & NCommand::Error){ ::close(ep[1]); efd_ = ep[0]; fcntl(efd_, F_SETFL, O_NONBLOCK); } if(mode_ & NCommand::OutputWithError){ ::close(op[1]); ofd_ = op[0]; efd_ = op[0]; fcntl(ofd_, F_SETFL, O_NONBLOCK); } } }
bool compile(const nstr& name, NNet& network, size_t threads){ RunNetwork* runNetwork = new RunNetwork; NNet::Layer* inputLayer = network.layer(0); size_t numLayers = network.numLayers(); RunLayer* lastRunLayer = 0; for(size_t l = 1; l < numLayers; ++l){ RunLayer* runLayer = new RunLayer; runLayer->queue = new Queue(threads); size_t inputLayerSize = inputLayer->size(); NNet::Layer* layer = network.layer(l); size_t layerSize = layer->size(); if(l > 1){ runLayer->inputVecStart = lastRunLayer->outputVecStart; runLayer->inputVec = lastRunLayer->outputVec; } if(l < numLayers - 1){ double* outputVecPtrStart; double* outputVecPtr; allocVector(layerSize, &outputVecPtrStart, &outputVecPtr); runLayer->outputVecStart = outputVecPtrStart; runLayer->outputVec = outputVecPtr; } TypeVec args; args.push_back(getPointer(doubleVecType(inputLayerSize))); args.push_back(getPointer(doubleVecType(inputLayerSize))); args.push_back(getPointer(doubleType())); args.push_back(int32Type()); FunctionType* ft = FunctionType::get(voidType(), args, false); Function* f = Function::Create(ft, Function::ExternalLinkage, name.c_str(), &module_); BasicBlock* entry = BasicBlock::Create(context_, "entry", f); builder_.SetInsertPoint(entry); auto aitr = f->arg_begin(); Value* inputVecPtr = aitr; inputVecPtr->setName("input_vec_ptr"); ++aitr; Value* weightVecPtr = aitr; weightVecPtr->setName("weight_vec_ptr"); ++aitr; Value* outputVecPtr = aitr; outputVecPtr->setName("output_vec_ptr"); ++aitr; Value* outputIndex = aitr; outputIndex->setName("output_index"); Value* inputVec = builder_.CreateLoad(inputVecPtr, "input_vec"); Value* weightVec = builder_.CreateLoad(weightVecPtr, "weight_vec"); Value* mulVec = builder_.CreateFMul(inputVec, weightVec, "mul_vec"); Value* sumActivation = builder_.CreateExtractElement(mulVec, getInt32(0), "sum_elem"); for(size_t i = 1; i < inputLayerSize; ++i){ Value* elem = builder_.CreateExtractElement(mulVec, getInt32(i), "sum_elem"); sumActivation = builder_.CreateFAdd(sumActivation, elem, "sum_activation"); } Value* output = getActivationOutput(layer->neuron(0), sumActivation); Value* outputElement = builder_.CreateGEP(outputVecPtr, outputIndex, "out_elem"); builder_.CreateStore(output, outputElement); builder_.CreateRetVoid(); runLayer->f = f; runLayer->fp = (void (*)(void*, void*, void*, int)) engine_->getPointerToFunction(f); for(size_t j = 0; j < layerSize; ++j){ NNet::Neuron* nj = layer->neuron(j); RunNeuron* runNeuron = new RunNeuron; runNeuron->layer = runLayer; runNeuron->outputIndex = j; double* weightVecPtrStart; double* weightVecPtr; allocVector(inputLayerSize, &weightVecPtrStart, &weightVecPtr); runNeuron->weightVecStart = weightVecPtrStart; runNeuron->weightVec = weightVecPtr; for(size_t i = 0; i < inputLayerSize; ++i){ NNet::Neuron* ni = inputLayer->neuron(i); weightVecPtr[i] = nj->weight(ni); } runLayer->queue->add(runNeuron); } runNetwork->layerVec.push_back(runLayer); inputLayer = layer; lastRunLayer = runLayer; } networkMap_.insert(make_pair(name, runNetwork)); return true; }