namespace jacobi_smp { vector<double> matrix1; vector<double> matrix2; void jacobi_kernel_wrapper(range const & y_range, size_t n, vector<double> & dst, vector<double> const & src) { for(size_t y = y_range.begin(); y < y_range.end(); ++y) { double * dst_ptr = &dst[y * n]; const double * src_ptr = &src[y * n]; jacobi_kernel( dst_ptr, src_ptr, n ); } } void jacobi_serial(size_t n, size_t iterations, std::string output_filename) { hpx::util::high_resolution_timer t; matrix1.resize(n*n); matrix2.resize(n*n); for(size_t i = 1; i < iterations; ++i) { matrix2[0] = ( matrix1[0] + matrix1[1] + matrix1[n] ) / 3;//LB for(size_t i = 1; i < n-1; i++) {//bot matrix2[i] = ( matrix1[i ] + matrix1[i-1] + matrix1[i+1] + matrix1[i+n] ) * .25; } matrix2[n-1] = ( matrix1[ n-1 ] + matrix1[n-2] + matrix1[n+n-1] ) / 3;//RB for(size_t i = 1; i < n-1; i++) { matrix2[i*n] = ( matrix1[ i *n] + matrix1[ i *n+1] + matrix1[(i-1)*n] + matrix1[(i+1)*n ] ) * .25;//left for(size_t j = 1; j < n-1; j++) { matrix2[i*n+j] = ( matrix1[ i *n+j ] + matrix1[ i *n+j-1] + matrix1[ i *n+j+1] + matrix1[(i-1)*n+j ] + matrix1[(i+1)*n+j ] ) * .2; //mid } matrix2[i*n+n-1] = ( matrix1[i*n+n-1] + matrix1[i*n+n-2] + matrix1[i*n-1] + matrix1[(i+2)*n-1] ) * .25;//right } matrix2[(n-1)*n] = ( matrix1[(n-1)*n] + matrix1[(n-1)*n+1] + matrix1[(n-2)*n] ) / 3;//TL for(size_t i = 1; i < n-1; i++) {//top matrix2[(n-1)*n + i] = ( matrix1[(n-1)*n+i] + matrix1[(n-1)*n+i-1] + matrix1[(n-1)*n+i+1] + matrix1[(n-2)*n+i ] ) * .25; } matrix2[n*n-1] = ( matrix1[n*n-1] + matrix1[n*n-2] + matrix1[n*n-1-n] ) / 3;//TR } report_timing(n, iterations, t.elapsed()); //output_grid(output_filename, *grid_old, n); } block jacobi_kernel_mid(block previous, block left, block right, block below, block above) { size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*n + j+1] + previous.src[i*(n-1) + j ] + previous.src[i*(n+1) + j ] ) * 0.2; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_top(block previous, block left, block right, block below){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*n + j+1] + previous.src[i*(n-1) + j ] ) * 0.25; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_bot(block previous, block left, block right, block above){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*n + j+1] + previous.src[i*(n+1) + j ] ) * 0.25; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_left (block previous, block right, block below, block above){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j+1] + previous.src[i*(n-1) + j ] + previous.src[i*(n+1) + j ] ) * 0.25; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_right(block previous, block left, block below, block above){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*(n-1) + j ] + previous.src[i*(n+1) + j ] ) * 0.25; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_TL(block previous, block right, block below ){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j+1] + previous.src[i*(n-1) + j ] ) / 3.0; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_TR(block previous, block left, block below ){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*(n-1) + j ] ) / 3.0; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_BL(block previous, block right, block above){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j+1] + previous.src[i*(n+1) + j ] ) / 3.0; } } std::swap(previous.src, previous.dest); return previous; } block jacobi_kernel_BR(block previous, block left, block above){ size_t block_size = previous.block_size; size_t n = previous.matrix_size; for(size_t i = previous.row; i < previous.row + block_size; i++) { for(size_t j = previous.col; j < previous.col + block_size; j++) { previous.dest[i*n + j] = (previous.src[i*n + j ] + previous.src[i*n + j-1] + previous.src[i*(n-1) + j ] ) / 3.0; } } std::swap(previous.src, previous.dest); return previous; } auto jacobi_op = unwrapped(&jacobi_kernel_mid); auto jacobi_bot = unwrapped(&jacobi_kernel_bot); auto jacobi_top = unwrapped(&jacobi_kernel_top); auto jacobi_left = unwrapped(&jacobi_kernel_left); auto jacobi_right = unwrapped(&jacobi_kernel_right); auto jacobi_BL = unwrapped(&jacobi_kernel_BL); auto jacobi_BR = unwrapped(&jacobi_kernel_BR); auto jacobi_TL = unwrapped(&jacobi_kernel_TL); auto jacobi_TR = unwrapped(&jacobi_kernel_TR); void block_init(vector< vector<block> > &blockList, size_t block_size, size_t matrix_size){ size_t numBlocks = static_cast<size_t>(std::ceil(double(matrix_size)/block_size)); size_t remainder = matrix_size % block_size; if(remainder == 0) { remainder = block_size; } matrix1.resize(matrix_size * matrix_size); matrix2.resize(matrix_size * matrix_size); blockList.resize(numBlocks); for(int i = 0; i < numBlocks; i++){ blockList[i].resize(numBlocks); for(int j = 0; j < numBlocks; j++) { blockList[i][j].matrix_size = matrix_size; if(i == numBlocks - 1 || j == numBlocks -1) { blockList[i][j].block_size = remainder; } else { blockList[i][j].block_size = block_size; } blockList[i][j].dest = matrix2.data(); blockList[i][j].src = matrix1.data(); blockList[i][j].col = j*block_size; blockList[i][j].row = i*block_size; } } } void jacobi_init(vector< vector< vector< hpx::shared_future<block> > > > &futureList, size_t n, size_t block_size) { vector< vector<block> > blockList; block_init(blockList, block_size, n); size_t numBlocks = blockList.size(); futureList[0].resize(numBlocks); futureList[1].resize(numBlocks); for(int i = 0; i < numBlocks; i++){ futureList[0][i].resize(numBlocks); futureList[1][i].resize(numBlocks); } const size_t curr = 1; futureList[curr][0][0] = async( jacobi_kernel_BL, blockList[0][0], blockList[0][1], blockList[1][0] ); for(size_t j = 1; j < numBlocks - 1; j++) { futureList[curr][j][0] = async( jacobi_kernel_left, blockList[j ][0], blockList[j ][1], blockList[j-1][0], blockList[j+1][0] ); } futureList[curr][numBlocks-1][0] = async( jacobi_kernel_TL, blockList[numBlocks-1][0], blockList[numBlocks-1][1], blockList[numBlocks-2][0] ); for(size_t j = 1; j < numBlocks - 1; j++) { futureList[curr][0][j] = async( jacobi_kernel_bot, blockList[0][j ], blockList[0][j-1], blockList[0][j+1], blockList[1][j ] ); for(size_t k = 1; k < numBlocks - 1; k++) { futureList[curr][j][k] = async( jacobi_kernel_mid, blockList[k ][j ], blockList[k ][j-1], blockList[k ][j+1], blockList[k-1][j ], blockList[k+1][j ]); } futureList[curr][numBlocks-1][j] = async( jacobi_kernel_top, blockList[numBlocks-1][j ], blockList[numBlocks-1][j-1], blockList[numBlocks-1][j+1], blockList[numBlocks-2][j ] ); } futureList[curr][0][numBlocks-1] = async( jacobi_kernel_BR, blockList[0][numBlocks-1], blockList[0][numBlocks-2], blockList[1][numBlocks-1]); for(size_t j = 1; j < numBlocks - 1; j++) { futureList[curr][j][numBlocks-1] = async( jacobi_kernel_left, blockList[j ][numBlocks-1], blockList[j ][numBlocks-2], blockList[j-1][numBlocks-1], blockList[j+1][numBlocks-1]); } futureList[curr][numBlocks-1][numBlocks-1] = async( jacobi_kernel_TR, blockList[numBlocks-1][numBlocks-1], blockList[numBlocks-1][numBlocks-2], blockList[numBlocks-2][numBlocks-1]); } void jacobi( size_t n , size_t iterations, size_t block_size, std::string output_filename) { hpx::util::high_resolution_timer t; vector< vector< vector< shared_future<block> > > > blockList(2); jacobi_init(blockList, n, block_size); size_t numBlocks = blockList[0].size(); for(size_t i = 1; i < iterations; ++i) { const size_t prev = i%2; const size_t curr = (i+1)%2; blockList[curr][0][0] = dataflow( jacobi_BL, blockList[prev][0][0], blockList[prev][0][1], blockList[prev][1][0] ); for(size_t j = 1; j < numBlocks - 1; j++) { blockList[curr][j][0] = dataflow( jacobi_left, blockList[prev][j ][0], blockList[prev][j ][1], blockList[prev][j-1][0], blockList[prev][j+1][0] ); } blockList[curr][numBlocks-1][0] = dataflow( jacobi_TL, blockList[prev][numBlocks-1][0], blockList[prev][numBlocks-1][1], blockList[prev][numBlocks-2][0] ); for(size_t j = 1; j < numBlocks - 1; j++) { blockList[curr][0][j] = dataflow( jacobi_bot, blockList[prev][0][j ], blockList[prev][0][j-1], blockList[prev][0][j+1], blockList[prev][1][j ] ); for(size_t k = 1; k < numBlocks - 1; k++) { blockList[curr][j][k] = dataflow( jacobi_op, blockList[prev][k ][j ], blockList[prev][k ][j-1], blockList[prev][k ][j+1], blockList[prev][k-1][j ], blockList[prev][k+1][j ]); } blockList[curr][numBlocks-1][j] = dataflow( jacobi_top, blockList[prev][numBlocks-1][j ], blockList[prev][numBlocks-1][j-1], blockList[prev][numBlocks-1][j+1], blockList[prev][numBlocks-2][j ] ); } blockList[curr][0][numBlocks-1] = dataflow( jacobi_BR, blockList[prev][0][numBlocks-1], blockList[prev][0][numBlocks-2], blockList[prev][1][numBlocks-1]); for(size_t j = 1; j < numBlocks - 1; j++) { blockList[curr][j][numBlocks-1] = dataflow( jacobi_left, blockList[prev][j ][numBlocks-1], blockList[prev][j ][numBlocks-2], blockList[prev][j-1][numBlocks-1], blockList[prev][j+1][numBlocks-1]); } blockList[curr][numBlocks-1][numBlocks-1] = dataflow( jacobi_TR, blockList[prev][numBlocks-1][numBlocks-1], blockList[prev][numBlocks-1][numBlocks-2], blockList[prev][numBlocks-2][numBlocks-1]); } for(int i = 0; i < blockList[(n-1)%2].size(); i++) { hpx::wait_all(blockList[(n-1)%2][i]); } report_timing(n, iterations, t.elapsed()); //output_grid(output_filename, *grid_old, n); } }
bool InterposeProperty(JSContext* cx, HandleObject target, const nsIID* iid, HandleId id, MutableHandle<JSPropertyDescriptor> descriptor) { // We only want to do interpostion on DOM instances and // wrapped natives. RootedObject unwrapped(cx, UncheckedUnwrap(target)); const js::Class* clasp = js::GetObjectClass(unwrapped); bool isCPOW = jsipc::IsWrappedCPOW(unwrapped); if (!mozilla::dom::IsDOMClass(clasp) && !IS_WN_CLASS(clasp) && !IS_PROTO_CLASS(clasp) && clasp != &OuterWindowProxyClass && !isCPOW) { return true; } XPCWrappedNativeScope* scope = ObjectScope(CurrentGlobalOrNull(cx)); MOZ_ASSERT(scope->HasInterposition()); nsCOMPtr<nsIAddonInterposition> interp = scope->GetInterposition(); InterpositionWhitelist* wl = XPCWrappedNativeScope::GetInterpositionWhitelist(interp); // We do InterposeProperty only if the id is on the whitelist of the interpostion // or if the target is a CPOW. if ((!wl || !wl->has(JSID_BITS(id.get()))) && !isCPOW) return true; JSAddonId* addonId = AddonIdOfObject(target); RootedValue addonIdValue(cx, StringValue(StringOfAddonId(addonId))); RootedValue prop(cx, IdToValue(id)); RootedValue targetValue(cx, ObjectValue(*target)); RootedValue descriptorVal(cx); nsresult rv = interp->InterposeProperty(addonIdValue, targetValue, iid, prop, &descriptorVal); if (NS_FAILED(rv)) { xpc::Throw(cx, rv); return false; } if (!descriptorVal.isObject()) return true; // We need to be careful parsing descriptorVal. |cx| is in the compartment // of the add-on and the descriptor is in the compartment of the // interposition. We could wrap the descriptor in the add-on's compartment // and then parse it. However, parsing the descriptor fetches properties // from it, and we would try to interpose on those property accesses. So // instead we parse in the interposition's compartment and then wrap the // descriptor. { JSAutoCompartment ac(cx, &descriptorVal.toObject()); if (!JS::ObjectToCompletePropertyDescriptor(cx, target, descriptorVal, descriptor)) return false; } // Always make the property non-configurable regardless of what the // interposition wants. descriptor.setAttributes(descriptor.attributes() | JSPROP_PERMANENT); if (!JS_WrapPropertyDescriptor(cx, descriptor)) return false; return true; }
void WrapperPromiseCallback::Call(JSContext* aCx, JS::Handle<JS::Value> aValue) { JSAutoCompartment ac(aCx, mGlobal); JS::Rooted<JS::Value> value(aCx, aValue); if (!JS_WrapValue(aCx, &value)) { NS_WARNING("Failed to wrap value into the right compartment."); return; } ErrorResult rv; // If invoking callback threw an exception, run resolver's reject with the // thrown exception as argument and the synchronous flag set. JS::Rooted<JS::Value> retValue(aCx); mCallback->Call(value, &retValue, rv, CallbackObject::eRethrowExceptions); rv.WouldReportJSException(); if (rv.Failed() && rv.IsJSException()) { JS::Rooted<JS::Value> value(aCx); rv.StealJSException(aCx, &value); if (!JS_WrapValue(aCx, &value)) { NS_WARNING("Failed to wrap value into the right compartment."); return; } mNextPromise->RejectInternal(aCx, value, Promise::SyncTask); return; } // If the return value is the same as the promise itself, throw TypeError. if (retValue.isObject()) { JS::Rooted<JSObject*> valueObj(aCx, &retValue.toObject()); Promise* returnedPromise; nsresult r = UNWRAP_OBJECT(Promise, valueObj, returnedPromise); if (NS_SUCCEEDED(r) && returnedPromise == mNextPromise) { const char* fileName = nullptr; uint32_t lineNumber = 0; // Try to get some information about the callback to report a sane error, // but don't try too hard (only deals with scripted functions). JS::Rooted<JSObject*> unwrapped(aCx, js::CheckedUnwrap(mCallback->Callback())); if (unwrapped) { JSAutoCompartment ac(aCx, unwrapped); if (JS_ObjectIsFunction(aCx, unwrapped)) { JS::Rooted<JS::Value> asValue(aCx, JS::ObjectValue(*unwrapped)); JS::Rooted<JSFunction*> func(aCx, JS_ValueToFunction(aCx, asValue)); MOZ_ASSERT(func); JSScript* script = JS_GetFunctionScript(aCx, func); if (script) { fileName = JS_GetScriptFilename(script); lineNumber = JS_GetScriptBaseLineNumber(aCx, script); } } } // We're back in aValue's compartment here. JS::Rooted<JSString*> stack(aCx, JS_GetEmptyString(JS_GetRuntime(aCx))); JS::Rooted<JSString*> fn(aCx, JS_NewStringCopyZ(aCx, fileName)); if (!fn) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return; } JS::Rooted<JSString*> message(aCx, JS_NewStringCopyZ(aCx, "then() cannot return same Promise that it resolves.")); if (!message) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return; } JS::Rooted<JS::Value> typeError(aCx); if (!JS::CreateTypeError(aCx, stack, fn, lineNumber, 0, nullptr, message, &typeError)) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return; } mNextPromise->RejectInternal(aCx, typeError, Promise::SyncTask); return; } } // Otherwise, run resolver's resolve with value and the synchronous flag // set. if (!JS_WrapValue(aCx, &retValue)) { NS_WARNING("Failed to wrap value into the right compartment."); return; } mNextPromise->ResolveInternal(aCx, retValue, Promise::SyncTask); }
static void member_declaration_list(struct typetree *type) { struct namespace ns = {0}; struct typetree *decl_base, *decl_type; const char *name; push_scope(&ns); do { decl_base = declaration_specifiers(NULL); do { name = NULL; decl_type = declarator(decl_base, &name); if (!name) { error("Missing name in member declarator."); exit(1); } else if (!size_of(decl_type)) { error("Field '%s' has incomplete type '%t'.", name, decl_type); exit(1); } else { sym_add(&ns, name, decl_type, SYM_DECLARATION, LINK_NONE); type_add_member(type, name, decl_type); } if (peek().token == ',') { consume(','); continue; } } while (peek().token != ';'); consume(';'); } while (peek().token != '}'); pop_scope(&ns); } static struct typetree *struct_or_union_declaration(void) { struct symbol *sym = NULL; struct typetree *type = NULL; enum type kind = (next().token == STRUCT) ? T_STRUCT : T_UNION; if (peek().token == IDENTIFIER) { const char *name = consume(IDENTIFIER).strval; sym = sym_lookup(&ns_tag, name); if (!sym) { type = type_init(kind); sym = sym_add(&ns_tag, name, type, SYM_TYPEDEF, LINK_NONE); } else if (is_integer(&sym->type)) { error("Tag '%s' was previously declared as enum.", sym->name); exit(1); } else if (sym->type.type != kind) { error("Tag '%s' was previously declared as %s.", sym->name, (sym->type.type == T_STRUCT) ? "struct" : "union"); exit(1); } /* Retrieve type from existing symbol, possibly providing a complete * definition that will be available for later declarations. Overwrites * existing type information from symbol table. */ type = &sym->type; if (peek().token == '{' && type->size) { error("Redefiniton of '%s'.", sym->name); exit(1); } } if (peek().token == '{') { if (!type) { /* Anonymous structure; allocate a new standalone type, * not part of any symbol. */ type = type_init(kind); } consume('{'); member_declaration_list(type); assert(type->size); consume('}'); } /* Return to the caller a copy of the root node, which can be overwritten * with new type qualifiers without altering the tag registration. */ return (sym) ? type_tagged_copy(&sym->type, sym->name) : type; } static void enumerator_list(void) { struct var val; struct symbol *sym; int enum_value = 0; consume('{'); do { const char *name = consume(IDENTIFIER).strval; if (peek().token == '=') { consume('='); val = constant_expression(); if (!is_integer(val.type)) { error("Implicit conversion from non-integer type in enum."); } enum_value = val.imm.i; } sym = sym_add( &ns_ident, name, &basic_type__int, SYM_ENUM_VALUE, LINK_NONE); sym->enum_value = enum_value++; if (peek().token != ',') break; consume(','); } while (peek().token != '}'); consume('}'); } static struct typetree *enum_declaration(void) { struct typetree *type = type_init(T_SIGNED, 4); consume(ENUM); if (peek().token == IDENTIFIER) { struct symbol *tag = NULL; const char *name = consume(IDENTIFIER).strval; tag = sym_lookup(&ns_tag, name); if (!tag || tag->depth < ns_tag.current_depth) { tag = sym_add(&ns_tag, name, type, SYM_TYPEDEF, LINK_NONE); } else if (!is_integer(&tag->type)) { error("Tag '%s' was previously defined as aggregate type.", tag->name); exit(1); } /* Use enum_value as a sentinel to represent definition, checked on * lookup to detect duplicate definitions. */ if (peek().token == '{') { if (tag->enum_value) { error("Redefiniton of enum '%s'.", tag->name); } enumerator_list(); tag->enum_value = 1; } } else { enumerator_list(); } /* Result is always integer. Do not care about the actual enum definition, * all enums are ints and no type checking is done. */ return type; } static struct typetree get_basic_type_from_specifier(unsigned short spec) { switch (spec) { case 0x0001: /* void */ return basic_type__void; case 0x0002: /* char */ case 0x0012: /* signed char */ return basic_type__char; case 0x0022: /* unsigned char */ return basic_type__unsigned_char; case 0x0004: /* short */ case 0x0014: /* signed short */ case 0x000C: /* short int */ case 0x001C: /* signed short int */ return basic_type__short; case 0x0024: /* unsigned short */ case 0x002C: /* unsigned short int */ return basic_type__unsigned_short; case 0x0008: /* int */ case 0x0010: /* signed */ case 0x0018: /* signed int */ return basic_type__int; case 0x0020: /* unsigned */ case 0x0028: /* unsigned int */ return basic_type__unsigned_int; case 0x0040: /* long */ case 0x0050: /* signed long */ case 0x0048: /* long int */ case 0x0058: /* signed long int */ case 0x00C0: /* long long */ case 0x00D0: /* signed long long */ case 0x00D8: /* signed long long int */ return basic_type__long; case 0x0060: /* unsigned long */ case 0x0068: /* unsigned long int */ case 0x00E0: /* unsigned long long */ case 0x00E8: /* unsigned long long int */ return basic_type__unsigned_long; case 0x0100: /* float */ return basic_type__float; case 0x0200: /* double */ case 0x0240: /* long double */ return basic_type__double; default: error("Invalid type specification."); exit(1); } } /* Parse type, qualifiers and storage class. Do not assume int by default, but * require at least one type specifier. Storage class is returned as token * value, unless the provided pointer is NULL, in which case the input is parsed * as specifier-qualifier-list. */ struct typetree *declaration_specifiers(int *stc) { struct typetree *type = NULL; struct token tok; int done = 0; /* Use a compact bit representation to hold state about declaration * specifiers. Initialize storage class to sentinel value. */ unsigned short spec = 0x0000; enum qualifier qual = Q_NONE; if (stc) *stc = '$'; #define set_specifier(d) \ if (spec & d) error("Duplicate type specifier '%s'.", tok.strval); \ next(); spec |= d; #define set_qualifier(d) \ if (qual & d) error("Duplicate type qualifier '%s'.", tok.strval); \ next(); qual |= d; #define set_storage_class(t) \ if (!stc) error("Unexpected storage class in qualifier list."); \ else if (*stc != '$') error("Multiple storage class specifiers."); \ next(); *stc = t; do { switch ((tok = peek()).token) { case VOID: set_specifier(0x001); break; case CHAR: set_specifier(0x002); break; case SHORT: set_specifier(0x004); break; case INT: set_specifier(0x008); break; case SIGNED: set_specifier(0x010); break; case UNSIGNED: set_specifier(0x020); break; case LONG: if (spec & 0x040) { set_specifier(0x080); } else { set_specifier(0x040); } break; case FLOAT: set_specifier(0x100); break; case DOUBLE: set_specifier(0x200); break; case CONST: set_qualifier(Q_CONST); break; case VOLATILE: set_qualifier(Q_VOLATILE); break; case IDENTIFIER: { struct symbol *tag = sym_lookup(&ns_ident, tok.strval); if (tag && tag->symtype == SYM_TYPEDEF && !type) { consume(IDENTIFIER); type = type_init(T_STRUCT); *type = tag->type; } else { done = 1; } break; } case UNION: case STRUCT: if (!type) { type = struct_or_union_declaration(); } else { done = 1; } break; case ENUM: if (!type) { type = enum_declaration(); } else { done = 1; } break; case AUTO: case REGISTER: case STATIC: case EXTERN: case TYPEDEF: set_storage_class(tok.token); break; default: done = 1; break; } if (type && spec) { error("Invalid combination of declaration specifiers."); exit(1); } } while (!done); #undef set_specifier #undef set_qualifier #undef set_storage_class if (type) { if (qual & type->qualifier) { error("Duplicate type qualifier:%s%s.", (qual & Q_CONST) ? " const" : "", (qual & Q_VOLATILE) ? " volatile" : ""); } } else if (spec) { type = type_init(T_STRUCT); *type = get_basic_type_from_specifier(spec); } else { error("Missing type specifier."); exit(1); } type->qualifier |= qual; return type; } /* Set var = 0, using simple assignment on members for composite types. This * rule does not consume any input, but generates a series of assignments on the * given variable. Point is to be able to zero initialize using normal simple * assignment rules, although IR can become verbose for large structures. */ static void zero_initialize(struct block *block, struct var target) { int i; struct var var; assert(target.kind == DIRECT); switch (target.type->type) { case T_STRUCT: case T_UNION: target.type = unwrapped(target.type); var = target; for (i = 0; i < nmembers(var.type); ++i) { target.type = get_member(var.type, i)->type; target.offset = var.offset + get_member(var.type, i)->offset; zero_initialize(block, target); } break; case T_ARRAY: assert(target.type->size); var = target; target.type = target.type->next; assert(is_struct(target.type) || !target.type->next); for (i = 0; i < var.type->size / var.type->next->size; ++i) { target.offset = var.offset + i * var.type->next->size; zero_initialize(block, target); } break; case T_POINTER: var = var_zero(8); var.type = type_init(T_POINTER, &basic_type__void); eval_assign(block, target, var); break; case T_UNSIGNED: case T_SIGNED: var = var_zero(target.type->size); eval_assign(block, target, var); break; default: error("Invalid type to zero-initialize, was '%t'.", target.type); exit(1); } }
void WrapperPromiseCallback::Call(JS::Handle<JS::Value> aValue) { // AutoCxPusher and co. interact with xpconnect, which crashes on // workers. On workers we'll get the right context from // GetDefaultJSContextForThread(), and since there is only one context, we // don't need to push or pop it from the stack. JSContext* cx = nsContentUtils::GetDefaultJSContextForThread(); Maybe<AutoCxPusher> pusher; if (NS_IsMainThread()) { pusher.construct(cx); } Maybe<JSAutoCompartment> ac; EnterCompartment(ac, cx, aValue); ErrorResult rv; // If invoking callback threw an exception, run resolver's reject with the // thrown exception as argument and the synchronous flag set. JS::Rooted<JS::Value> value(cx, mCallback->Call(aValue, rv, CallbackObject::eRethrowExceptions)); rv.WouldReportJSException(); if (rv.Failed() && rv.IsJSException()) { JS::Rooted<JS::Value> value(cx); rv.StealJSException(cx, &value); Maybe<JSAutoCompartment> ac2; EnterCompartment(ac2, cx, value); mNextPromise->RejectInternal(cx, value, Promise::SyncTask); return; } // If the return value is the same as the promise itself, throw TypeError. if (value.isObject()) { JS::Rooted<JSObject*> valueObj(cx, &value.toObject()); Promise* returnedPromise; nsresult r = UNWRAP_OBJECT(Promise, valueObj, returnedPromise); if (NS_SUCCEEDED(r) && returnedPromise == mNextPromise) { const char* fileName = nullptr; uint32_t lineNumber = 0; // Try to get some information about the callback to report a sane error, // but don't try too hard (only deals with scripted functions). JS::Rooted<JSObject*> unwrapped(cx, js::CheckedUnwrap(mCallback->Callback())); if (unwrapped) { JSAutoCompartment ac(cx, unwrapped); if (JS_ObjectIsFunction(cx, unwrapped)) { JS::Rooted<JS::Value> asValue(cx, JS::ObjectValue(*unwrapped)); JS::Rooted<JSFunction*> func(cx, JS_ValueToFunction(cx, asValue)); MOZ_ASSERT(func); JSScript* script = JS_GetFunctionScript(cx, func); if (script) { fileName = JS_GetScriptFilename(cx, script); lineNumber = JS_GetScriptBaseLineNumber(cx, script); } } } // We're back in aValue's compartment here. JS::Rooted<JSString*> stack(cx, JS_GetEmptyString(JS_GetRuntime(cx))); JS::Rooted<JSString*> fn(cx, JS_NewStringCopyZ(cx, fileName)); if (!fn) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(cx); return; } JS::Rooted<JSString*> message(cx, JS_NewStringCopyZ(cx, "then() cannot return same Promise that it resolves.")); if (!message) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(cx); return; } JS::Rooted<JS::Value> typeError(cx); if (!JS::CreateTypeError(cx, stack, fn, lineNumber, 0, nullptr, message, &typeError)) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(cx); return; } mNextPromise->RejectInternal(cx, typeError, Promise::SyncTask); return; } } // Otherwise, run resolver's resolve with value and the synchronous flag // set. Maybe<JSAutoCompartment> ac2; EnterCompartment(ac2, cx, value); mNextPromise->ResolveInternal(cx, value, Promise::SyncTask); }
nsresult WrapperPromiseCallback::Call(JSContext* aCx, JS::Handle<JS::Value> aValue) { JS::ExposeObjectToActiveJS(mGlobal); JS::ExposeValueToActiveJS(aValue); JSAutoCompartment ac(aCx, mGlobal); JS::Rooted<JS::Value> value(aCx, aValue); if (!JS_WrapValue(aCx, &value)) { NS_WARNING("Failed to wrap value into the right compartment."); return NS_ERROR_FAILURE; } ErrorResult rv; // PromiseReactionTask step 6 JS::Rooted<JS::Value> retValue(aCx); JSCompartment* compartment; if (mNextPromise) { compartment = mNextPromise->Compartment(); } else { MOZ_ASSERT(mNextPromiseObj); compartment = js::GetObjectCompartment(mNextPromiseObj); } mCallback->Call(value, &retValue, rv, "promise callback", CallbackObject::eRethrowExceptions, compartment); rv.WouldReportJSException(); // PromiseReactionTask step 7 if (rv.Failed()) { JS::Rooted<JS::Value> value(aCx); { // Scope for JSAutoCompartment // Convert the ErrorResult to a JS exception object that we can reject // ourselves with. This will be exactly the exception that would get // thrown from a binding method whose ErrorResult ended up with whatever // is on "rv" right now. Do this in the promise reflector compartment. Maybe<JSAutoCompartment> ac; if (mNextPromise) { ac.emplace(aCx, mNextPromise->GlobalJSObject()); } else { ac.emplace(aCx, mNextPromiseObj); } DebugOnly<bool> conversionResult = ToJSValue(aCx, rv, &value); MOZ_ASSERT(conversionResult); } if (mNextPromise) { mNextPromise->RejectInternal(aCx, value); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult rejectRv; mRejectFunc->Call(value, &ignored, rejectRv); // This reported any JS exceptions; we just have a pointless exception on // there now. rejectRv.SuppressException(); } return NS_OK; } // If the return value is the same as the promise itself, throw TypeError. if (retValue.isObject()) { JS::Rooted<JSObject*> valueObj(aCx, &retValue.toObject()); valueObj = js::CheckedUnwrap(valueObj); JS::Rooted<JSObject*> nextPromiseObj(aCx); if (mNextPromise) { nextPromiseObj = mNextPromise->GetWrapper(); } else { MOZ_ASSERT(mNextPromiseObj); nextPromiseObj = mNextPromiseObj; } // XXXbz shouldn't this check be over in ResolveInternal anyway? if (valueObj == nextPromiseObj) { const char* fileName = nullptr; uint32_t lineNumber = 0; // Try to get some information about the callback to report a sane error, // but don't try too hard (only deals with scripted functions). JS::Rooted<JSObject*> unwrapped(aCx, js::CheckedUnwrap(mCallback->Callback())); if (unwrapped) { JSAutoCompartment ac(aCx, unwrapped); if (JS_ObjectIsFunction(aCx, unwrapped)) { JS::Rooted<JS::Value> asValue(aCx, JS::ObjectValue(*unwrapped)); JS::Rooted<JSFunction*> func(aCx, JS_ValueToFunction(aCx, asValue)); MOZ_ASSERT(func); JSScript* script = JS_GetFunctionScript(aCx, func); if (script) { fileName = JS_GetScriptFilename(script); lineNumber = JS_GetScriptBaseLineNumber(aCx, script); } } } // We're back in aValue's compartment here. JS::Rooted<JSString*> fn(aCx, JS_NewStringCopyZ(aCx, fileName)); if (!fn) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } JS::Rooted<JSString*> message(aCx, JS_NewStringCopyZ(aCx, "then() cannot return same Promise that it resolves.")); if (!message) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } JS::Rooted<JS::Value> typeError(aCx); if (!JS::CreateError(aCx, JSEXN_TYPEERR, nullptr, fn, lineNumber, 0, nullptr, message, &typeError)) { // Out of memory. Promise will stay unresolved. JS_ClearPendingException(aCx); return NS_ERROR_OUT_OF_MEMORY; } if (mNextPromise) { mNextPromise->RejectInternal(aCx, typeError); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult rejectRv; mRejectFunc->Call(typeError, &ignored, rejectRv); // This reported any JS exceptions; we just have a pointless exception // on there now. rejectRv.SuppressException(); } return NS_OK; } } // Otherwise, run resolver's resolve with value. if (!JS_WrapValue(aCx, &retValue)) { NS_WARNING("Failed to wrap value into the right compartment."); return NS_ERROR_FAILURE; } if (mNextPromise) { mNextPromise->ResolveInternal(aCx, retValue); } else { JS::Rooted<JS::Value> ignored(aCx); ErrorResult resolveRv; mResolveFunc->Call(retValue, &ignored, resolveRv); // This reported any JS exceptions; we just have a pointless exception // on there now. resolveRv.SuppressException(); } return NS_OK; }