/* * new ArrayBuffer(byteLength) */ bool ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) return false; int32_t nbytes = 0; if (argc > 0 && !ToInt32(cx, args[0], &nbytes)) return false; if (nbytes < 0) { /* * We're just not going to support arrays that are bigger than what will fit * as an integer value; if someone actually ever complains (validly), then we * can fix. */ JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_ARRAY_LENGTH); return false; } RootedObject proto(cx); RootedObject newTarget(cx, &args.newTarget().toObject()); if (!GetPrototypeFromConstructor(cx, newTarget, &proto)) return false; JSObject* bufobj = create(cx, uint32_t(nbytes), proto); if (!bufobj) return false; args.rval().setObject(*bufobj); return true; }
/* * Convert |v| to an array index for an array of length |length| per * the Typed Array Specification section 7.0, |subarray|. If successful, * the output value is in the range [0, length]. */ bool js::ToClampedIndex(JSContext* cx, HandleValue v, uint32_t length, uint32_t* out) { int32_t result; if (!ToInt32(cx, v, &result)) return false; if (result < 0) { result += length; if (result < 0) result = 0; } else if (uint32_t(result) > length) { result = length; } *out = uint32_t(result); return true; }
static void setIndexValue(SharedTypedArrayObject& tarray, uint32_t index, double d) { // If the array is an integer array, we only handle up to // 32-bit ints from this point on. if we want to handle // 64-bit ints, we'll need some changes. // Assign based on characteristics of the destination type if (ArrayTypeIsFloatingPoint()) { setIndex(tarray, index, NativeType(d)); } else if (ArrayTypeIsUnsigned()) { MOZ_ASSERT(sizeof(NativeType) <= 4); uint32_t n = ToUint32(d); setIndex(tarray, index, NativeType(n)); } else if (ArrayTypeID() == Scalar::Uint8Clamped) { // The uint8_clamped type has a special rounding converter // for doubles. setIndex(tarray, index, NativeType(d)); } else { MOZ_ASSERT(sizeof(NativeType) <= 4); int32_t n = ToInt32(d); setIndex(tarray, index, NativeType(n)); } }
/* * Experimental implementation of ArrayBuffer.transfer: * https://gist.github.com/andhow/95fb9e49996615764eff * which is currently in the early stages of proposal for ES7. */ bool ArrayBufferObject::fun_transfer(JSContext* cx, unsigned argc, Value* vp) { CallArgs args = CallArgsFromVp(argc, vp); HandleValue oldBufferArg = args.get(0); HandleValue newByteLengthArg = args.get(1); if (!oldBufferArg.isObject()) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); return false; } RootedObject oldBufferObj(cx, &oldBufferArg.toObject()); ESClassValue cls; if (!GetBuiltinClass(cx, oldBufferObj, &cls)) return false; if (cls != ESClass_ArrayBuffer) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); return false; } // Beware: oldBuffer can point across compartment boundaries. ArrayBuffer // contents are not compartment-specific so this is safe. Rooted<ArrayBufferObject*> oldBuffer(cx); if (oldBufferObj->is<ArrayBufferObject>()) { oldBuffer = &oldBufferObj->as<ArrayBufferObject>(); } else { JSObject* unwrapped = CheckedUnwrap(oldBufferObj); if (!unwrapped || !unwrapped->is<ArrayBufferObject>()) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS); return false; } oldBuffer = &unwrapped->as<ArrayBufferObject>(); } size_t oldByteLength = oldBuffer->byteLength(); size_t newByteLength; if (newByteLengthArg.isUndefined()) { newByteLength = oldByteLength; } else { int32_t i32; if (!ToInt32(cx, newByteLengthArg, &i32)) return false; if (i32 < 0) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_BAD_ARRAY_LENGTH); return false; } newByteLength = size_t(i32); } if (oldBuffer->isNeutered()) { JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_DETACHED); return false; } UniquePtr<uint8_t, JS::FreePolicy> newData; if (!newByteLength) { if (!ArrayBufferObject::neuter(cx, oldBuffer, oldBuffer->contents())) return false; } else { # if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB) // With a 4gb mapped asm.js buffer, we can simply enable/disable access // to the delta as long as the requested length is page-sized. if (oldBuffer->isAsmJSMapped() && (newByteLength % AsmJSPageSize) == 0) return TransferAsmJSMappedBuffer(cx, args, oldBuffer, newByteLength); # endif // Since we try to realloc below, only allow stealing malloc'd buffers. // If !hasMallocedContents, stealContents will malloc a copy which we // can then realloc. bool steal = oldBuffer->hasMallocedContents(); auto stolenContents = ArrayBufferObject::stealContents(cx, oldBuffer, steal); if (!stolenContents) return false; UniquePtr<uint8_t, JS::FreePolicy> oldData(stolenContents.data()); if (newByteLength > oldByteLength) { // In theory, realloc+memset(0) can be optimized to avoid touching // any pages (by using OS page mapping tricks). However, in // practice, we don't seem to get this optimization in Firefox with // jemalloc so calloc+memcpy are faster. newData.reset(cx->runtime()->pod_callocCanGC<uint8_t>(newByteLength)); if (newData) { memcpy(newData.get(), oldData.get(), oldByteLength); } else { // Try malloc before giving up since it might be able to succed // by resizing oldData in-place. newData.reset(cx->pod_realloc(oldData.get(), oldByteLength, newByteLength)); if (!newData) return false; oldData.release(); memset(newData.get() + oldByteLength, 0, newByteLength - oldByteLength); } } else if (newByteLength < oldByteLength) { newData.reset(cx->pod_realloc(oldData.get(), oldByteLength, newByteLength)); if (!newData) return false; oldData.release(); } else { newData = Move(oldData); } } RootedObject newBuffer(cx, JS_NewArrayBufferWithContents(cx, newByteLength, newData.get())); if (!newBuffer) return false; newData.release(); args.rval().setObject(*newBuffer); return true; }