bool ModuleGenerator::declareExport(UniqueChars fieldName, uint32_t funcIndex, uint32_t* exportIndex) { if (!exportMap_->fieldNames.append(Move(fieldName))) return false; FuncIndexMap::AddPtr p = funcIndexToExport_.lookupForAdd(funcIndex); if (p) { if (exportIndex) *exportIndex = p->value(); return exportMap_->fieldsToExports.append(p->value()); } uint32_t newExportIndex = module_->exports.length(); MOZ_ASSERT(newExportIndex < MaxExports); if (exportIndex) *exportIndex = newExportIndex; Sig copy; if (!copy.clone(funcSig(funcIndex))) return false; return module_->exports.append(Move(copy)) && funcIndexToExport_.add(p, funcIndex, newExportIndex) && exportMap_->fieldsToExports.append(newExportIndex) && exportMap_->exportFuncIndices.append(funcIndex); }
/* * Load Train Data (Signatures and respective names) from a directory * Returns the number of signatures read * TODO: make this windows compatible */ TrainData *Database::loadTrainDataFromDir(string dir, int *n) { Sig sig; vector<string> files = vector<string>(); string fname, temp; char buf[1024]; unsigned int ii; TrainData *data; getFileList(dir, files); data = new TrainData [files.size()]; getcwd(buf, 1024); // save current dir // cout << "Changing CWD to " << dir << "." << endl; chdir(dir.c_str()); // change current dir to data dir for (ii = 0; ii < files.size(); ii++) { fname = files[ii]; // cout << "Reading " << fname << "." << endl; sig.load(fname); data[ii].name = filenameToName(files[ii]); // Get Name from sig file data[ii].sigArr = sig.toArray(); // convert sig to array data[ii].value = (double) tableLookup(data[ii].name); /* get value * (table index) */ } // cout << "Back to previous CWD." << endl; chdir(buf); // change back to original working dir // cout << "Read a total of " << ii << " files." << endl; *n = ii; return data; }
bool ModuleGenerator::finishFuncExports() { // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The // FuncExportVector stored in Metadata needs to be stored sorted by // function index to allow O(log(n)) lookup at runtime. Uint32Vector funcIndices; if (!funcIndices.reserve(exportedFuncs_.count())) return false; for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront()) funcIndices.infallibleAppend(r.front()); std::sort(funcIndices.begin(), funcIndices.end()); MOZ_ASSERT(metadata_->funcExports.empty()); if (!metadata_->funcExports.reserve(exportedFuncs_.count())) return false; for (uint32_t funcIndex : funcIndices) { Sig sig; if (!sig.clone(funcSig(funcIndex))) return false; metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, funcIndexToCodeRange_[funcIndex]); } return true; }
int NeuralNet::test(string filePath, double *prob) { Sig testSig; double temp; double *test; int max, ii; testSig.load(filePath); test = testSig.toArray(); /* feed the test data to the net */ bp->ffwd(test); for (ii = 0 ; ii < numRes ; ii++) { if ((prob[ii] = bp->Out(ii)) > temp) { /* the highest value of the output nodes * will determine the actual result * i.e. who the test signature belongs to */ temp = bp->Out(ii); max = ii; } } return max; }
bool ModuleGenerator::addImport(const Sig& sig, uint32_t globalDataOffset) { Sig copy; if (!copy.clone(sig)) return false; return module_->imports.emplaceBack(Move(copy), globalDataOffset); }
static bool DecodeCallWithSig(FunctionDecoder& f, const Sig& sig, ExprType expected) { for (ValType argType : sig.args()) { if (!DecodeExpr(f, ToExprType(argType))) return false; } return CheckType(f, sig.ret(), expected); }
bool ModuleGenerator::addFuncImport(const Sig& sig, uint32_t globalDataOffset) { MOZ_ASSERT(!finishedFuncDefs_); Sig copy; if (!copy.clone(sig)) return false; return metadata_->funcImports.emplaceBack(Move(copy), globalDataOffset); }
static bool CheckTypeForJS(JSContext* cx, Decoder& d, const Sig& sig) { for (ValType argType : sig.args()) { if (argType == ValType::I64) return Fail(cx, d, "cannot import/export i64 argument"); } if (sig.ret() == ExprType::I64) return Fail(cx, d, "cannot import/export i64 return type"); return true; }
QDebug operator << (QDebug dbg, Sig &sig) { dbg.nospace() << "[mean=" << sig.mean() << ",std="<<sig.stddev() << ",wt="<<sig.weight(); DynamicHistogram *h = dynamic_cast<DynamicHistogram *> (sig.m_histogram); if (h) { dbg.nospace() << ",h="<<*h; } else { dbg.nospace() << ",h="<<*(sig.m_histogram); } dbg.nospace() << "]"; return dbg.space(); }
static bool DecodeCallWithSig(FunctionDecoder& f, const Sig& sig, ExprType* type) { for (ValType argType : sig.args()) { ExprType exprType; if (!DecodeExpr(f, &exprType)) return false; if (!CheckType(f, exprType, argType)) return false; } *type = sig.ret(); return true; }
bool ModuleGenerator::declareExport(uint32_t funcIndex, uint32_t* exportIndex) { FuncIndexMap::AddPtr p = funcIndexToExport_.lookupForAdd(funcIndex); if (p) { *exportIndex = p->value(); return true; } Sig copy; if (!copy.clone(funcSig(funcIndex))) return false; *exportIndex = module_->exports.length(); return funcIndexToExport_.add(p, funcIndex, *exportIndex) && module_->exports.append(Move(copy)) && exportFuncIndices_.append(funcIndex); }
bool ModuleGenerator::finishFuncExports() { // In addition to all the functions that were explicitly exported, any // element of an exported table is also exported. for (ElemSegment& elems : elemSegments_) { if (shared_->tables[elems.tableIndex].external) { for (uint32_t funcIndex : elems.elemFuncIndices) { if (!exportedFuncs_.put(funcIndex)) return false; } } } // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The // FuncExportVector stored in Metadata needs to be stored sorted by // function index to allow O(log(n)) lookup at runtime. Uint32Vector sorted; if (!sorted.reserve(exportedFuncs_.count())) return false; for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront()) sorted.infallibleAppend(r.front()); std::sort(sorted.begin(), sorted.end()); MOZ_ASSERT(metadata_->funcExports.empty()); if (!metadata_->funcExports.reserve(sorted.length())) return false; for (uint32_t funcIndex : sorted) { Sig sig; if (!sig.clone(funcSig(funcIndex))) return false; uint32_t codeRangeIndex = funcToCodeRange_[funcIndex]; metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex); } return true; }
static size_t SizeOfSigExcludingThis(const Sig& sig, MallocSizeOf mallocSizeOf) { return sig.args().sizeOfExcludingThis(mallocSizeOf); }
static size_t SerializedSigSize(const Sig& sig) { return sizeof(ExprType) + SerializedPodVectorSize(sig.args()); }
bool ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args, Metadata* maybeAsmJSMetadata) { shared_ = Move(shared); alwaysBaseline_ = args.alwaysBaseline; if (!exportedFuncs_.init()) return false; linkData_.globalDataLength = AlignBytes(InitialGlobalDataBytes, sizeof(void*));; // asm.js passes in an AsmJSMetadata subclass to use instead. if (maybeAsmJSMetadata) { metadata_ = maybeAsmJSMetadata; MOZ_ASSERT(isAsmJS()); } else { metadata_ = js_new<Metadata>(); if (!metadata_) return false; MOZ_ASSERT(!isAsmJS()); } if (args.scriptedCaller.filename) { metadata_->filename = DuplicateString(args.scriptedCaller.filename.get()); if (!metadata_->filename) return false; } if (!metadata_->assumptions.clone(args.assumptions)) return false; // For asm.js, the Vectors in ModuleGeneratorData are max-sized reservations // and will be initialized in a linear order via init* functions as the // module is generated. For wasm, the Vectors are correctly-sized and // already initialized. if (!isAsmJS()) { numSigs_ = shared_->sigs.length(); numTables_ = shared_->tables.length(); for (FuncImportGenDesc& funcImport : shared_->funcImports) { MOZ_ASSERT(!funcImport.globalDataOffset); funcImport.globalDataOffset = linkData_.globalDataLength; linkData_.globalDataLength += sizeof(FuncImportTls); if (!addFuncImport(*funcImport.sig, funcImport.globalDataOffset)) return false; } for (const Import& import : imports_) { if (import.kind == DefinitionKind::Table) { MOZ_ASSERT(shared_->tables.length() == 1); shared_->tables[0].external = true; break; } } for (TableDesc& table : shared_->tables) { if (!allocateGlobalBytes(sizeof(void*), sizeof(void*), &table.globalDataOffset)) return false; } for (uint32_t i = 0; i < numSigs_; i++) { SigWithId& sig = shared_->sigs[i]; if (SigIdDesc::isGlobal(sig)) { uint32_t globalDataOffset; if (!allocateGlobalBytes(sizeof(void*), sizeof(void*), &globalDataOffset)) return false; sig.id = SigIdDesc::global(sig, globalDataOffset); Sig copy; if (!copy.clone(sig)) return false; if (!metadata_->sigIds.emplaceBack(Move(copy), sig.id)) return false; } else { sig.id = SigIdDesc::immediate(sig); } } for (GlobalDesc& global : shared_->globals) { if (global.isConstant()) continue; if (!allocateGlobal(&global)) return false; } } else { MOZ_ASSERT(shared_->sigs.length() == MaxSigs); MOZ_ASSERT(shared_->tables.length() == MaxTables); MOZ_ASSERT(shared_->asmJSSigToTableIndex.length() == MaxSigs); } return true; }
// Generate a stub that enters wasm from a C++ caller via the native ABI. // The signature of the entry point is Module::CodePtr. The exported wasm // function has an ABI derived from its specific signature, so this function // must map from the ABI of CodePtr to the export's signature's ABI. Offsets wasm::GenerateEntry(MacroAssembler& masm, unsigned target, const Sig& sig, bool usesHeap) { masm.haltingAlign(CodeAlignment); Offsets offsets; offsets.begin = masm.currentOffset(); // Save the return address if it wasn't already saved by the call insn. #if defined(JS_CODEGEN_ARM) masm.push(lr); #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) masm.push(ra); #elif defined(JS_CODEGEN_X86) static const unsigned EntryFrameSize = sizeof(void*); #endif // Save all caller non-volatile registers before we clobber them here and in // the asm.js callee (which does not preserve non-volatile registers). masm.setFramePushed(0); masm.PushRegsInMask(NonVolatileRegs); MOZ_ASSERT(masm.framePushed() == FramePushedAfterSave); // ARM and MIPS/MIPS64 have a globally-pinned GlobalReg (x64 uses RIP-relative // addressing, x86 uses immediates in effective addresses). For the // AsmJSGlobalRegBias addition, see Assembler-(mips,arm).h. #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) masm.movePtr(IntArgReg1, GlobalReg); masm.addPtr(Imm32(AsmJSGlobalRegBias), GlobalReg); #endif // ARM, MIPS/MIPS64 and x64 have a globally-pinned HeapReg (x86 uses immediates in // effective addresses). Loading the heap register depends on the global // register already having been loaded. if (usesHeap) masm.loadAsmJSHeapRegisterFromGlobalData(); // Put the 'argv' argument into a non-argument/return register so that we // can use 'argv' while we fill in the arguments for the asm.js callee. // Also, save 'argv' on the stack so that we can recover it after the call. // Use a second non-argument/return register as temporary scratch. Register argv = ABIArgGenerator::NonArgReturnReg0; Register scratch = ABIArgGenerator::NonArgReturnReg1; #if defined(JS_CODEGEN_X86) masm.loadPtr(Address(masm.getStackPointer(), EntryFrameSize + masm.framePushed()), argv); #else masm.movePtr(IntArgReg0, argv); #endif masm.Push(argv); // Save the stack pointer to the saved non-volatile registers. We will use // this on two paths: normal return and exceptional return. Since // loadWasmActivation uses GlobalReg, we must do this after loading // GlobalReg. MOZ_ASSERT(masm.framePushed() == FramePushedForEntrySP); masm.loadWasmActivation(scratch); masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP())); // Dynamically align the stack since ABIStackAlignment is not necessarily // AsmJSStackAlignment. We'll use entrySP to recover the original stack // pointer on return. masm.andToStackPtr(Imm32(~(AsmJSStackAlignment - 1))); // Bump the stack for the call. masm.reserveStack(AlignBytes(StackArgBytes(sig.args()), AsmJSStackAlignment)); // Copy parameters out of argv and into the registers/stack-slots specified by // the system ABI. for (ABIArgValTypeIter iter(sig.args()); !iter.done(); iter++) { unsigned argOffset = iter.index() * Module::SizeOfEntryArg; Address src(argv, argOffset); MIRType type = iter.mirType(); switch (iter->kind()) { case ABIArg::GPR: masm.load32(src, iter->gpr()); break; #ifdef JS_CODEGEN_REGISTER_PAIR case ABIArg::GPR_PAIR: MOZ_CRASH("wasm uses hardfp for function calls."); break; #endif case ABIArg::FPU: { static_assert(Module::SizeOfEntryArg >= jit::Simd128DataSize, "EntryArg must be big enough to store SIMD values"); switch (type) { case MIRType_Int32x4: case MIRType_Bool32x4: masm.loadUnalignedInt32x4(src, iter->fpu()); break; case MIRType_Float32x4: masm.loadUnalignedFloat32x4(src, iter->fpu()); break; case MIRType_Double: masm.loadDouble(src, iter->fpu()); break; case MIRType_Float32: masm.loadFloat32(src, iter->fpu()); break; default: MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type"); break; } break; } case ABIArg::Stack: switch (type) { case MIRType_Int32: masm.load32(src, scratch); masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; case MIRType_Double: masm.loadDouble(src, ScratchDoubleReg); masm.storeDouble(ScratchDoubleReg, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; case MIRType_Float32: masm.loadFloat32(src, ScratchFloat32Reg); masm.storeFloat32(ScratchFloat32Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; case MIRType_Int32x4: case MIRType_Bool32x4: masm.loadUnalignedInt32x4(src, ScratchSimd128Reg); masm.storeAlignedInt32x4(ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; case MIRType_Float32x4: masm.loadUnalignedFloat32x4(src, ScratchSimd128Reg); masm.storeAlignedFloat32x4(ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase())); break; default: MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type"); } break; } } // Call into the real function. masm.assertStackAlignment(AsmJSStackAlignment); masm.call(CallSiteDesc(CallSiteDesc::Relative), AsmJSInternalCallee(target)); // Recover the stack pointer value before dynamic alignment. masm.loadWasmActivation(scratch); masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP())); masm.setFramePushed(FramePushedForEntrySP); // Recover the 'argv' pointer which was saved before aligning the stack. masm.Pop(argv); // Store the return value in argv[0] switch (sig.ret()) { case ExprType::Void: break; case ExprType::I32: masm.storeValue(JSVAL_TYPE_INT32, ReturnReg, Address(argv, 0)); break; case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); case ExprType::F32: masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg); MOZ_FALLTHROUGH; // as ReturnDoubleReg now contains a Double case ExprType::F64: masm.canonicalizeDouble(ReturnDoubleReg); masm.storeDouble(ReturnDoubleReg, Address(argv, 0)); break; case ExprType::I32x4: case ExprType::B32x4: // We don't have control on argv alignment, do an unaligned access. masm.storeUnalignedInt32x4(ReturnSimd128Reg, Address(argv, 0)); break; case ExprType::F32x4: // We don't have control on argv alignment, do an unaligned access. masm.storeUnalignedFloat32x4(ReturnSimd128Reg, Address(argv, 0)); break; case ExprType::Limit: MOZ_CRASH("Limit"); } // Restore clobbered non-volatile registers of the caller. masm.PopRegsInMask(NonVolatileRegs); MOZ_ASSERT(masm.framePushed() == 0); masm.move32(Imm32(true), ReturnReg); masm.ret(); offsets.end = masm.currentOffset(); return offsets; }