summaryrefslogtreecommitdiff
path: root/js/public
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2013-12-11 22:24:18 +0000
committer <>2014-07-24 09:30:59 +0000
commit59e2936f588aa945e8dcd6c737523c299067e9d0 (patch)
tree97e74980cc54baa19de5faa11f5a50a0121a48ea /js/public
downloadmozjs24-master.tar.gz
Imported from /home/lorry/working-area/delta_mozilla_mozjs24/mozjs-24.2.0.tar.bz2.HEADmozjs-24.2.0master
Diffstat (limited to 'js/public')
-rw-r--r--js/public/Anchor.h162
-rw-r--r--js/public/CallArgs.h403
-rw-r--r--js/public/CharacterEncoding.h155
-rw-r--r--js/public/Date.h35
-rw-r--r--js/public/GCAPI.h320
-rw-r--r--js/public/HashTable.h1461
-rw-r--r--js/public/HeapAPI.h145
-rw-r--r--js/public/LegacyIntTypes.h60
-rw-r--r--js/public/MemoryMetrics.h460
-rw-r--r--js/public/PropertyKey.h98
-rw-r--r--js/public/RequiredDefines.h23
-rw-r--r--js/public/RootingAPI.h957
-rw-r--r--js/public/TemplateLib.h117
-rw-r--r--js/public/Utility.h871
-rw-r--r--js/public/Value.h1844
-rw-r--r--js/public/Vector.h1107
16 files changed, 8218 insertions, 0 deletions
diff --git a/js/public/Anchor.h b/js/public/Anchor.h
new file mode 100644
index 0000000..0d458e6
--- /dev/null
+++ b/js/public/Anchor.h
@@ -0,0 +1,162 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS::Anchor implementation. */
+
+#ifndef js_Anchor_h
+#define js_Anchor_h
+
+#include "mozilla/Attributes.h"
+
+class JSFunction;
+class JSObject;
+class JSScript;
+class JSString;
+
+namespace JS { class Value; }
+
+namespace JS {
+
+/*
+ * Protecting non-Value, non-JSObject *, non-JSString * values from collection
+ *
+ * Most of the time, the garbage collector's conservative stack scanner works
+ * behind the scenes, finding all live values and protecting them from being
+ * collected. However, when JSAPI client code obtains a pointer to data the
+ * scanner does not know about, owned by an object the scanner does know about,
+ * Care Must Be Taken.
+ *
+ * The scanner recognizes only a select set of types: pointers to JSObjects and
+ * similar things (JSFunctions, and so on), pointers to JSStrings, and Values.
+ * So while the scanner finds all live |JSString| pointers, it does not notice
+ * |jschar| pointers.
+ *
+ * So suppose we have:
+ *
+ * void f(JSString *str) {
+ * const jschar *ch = JS_GetStringCharsZ(str);
+ * ... do stuff with ch, but no uses of str ...;
+ * }
+ *
+ * After the call to |JS_GetStringCharsZ|, there are no further uses of
+ * |str|, which means that the compiler is within its rights to not store
+ * it anywhere. But because the stack scanner will not notice |ch|, there
+ * is no longer any live value in this frame that would keep the string
+ * alive. If |str| is the last reference to that |JSString|, and the
+ * collector runs while we are using |ch|, the string's array of |jschar|s
+ * may be freed out from under us.
+ *
+ * Note that there is only an issue when 1) we extract a thing X the scanner
+ * doesn't recognize from 2) a thing Y the scanner does recognize, and 3) if Y
+ * gets garbage-collected, then X gets freed. If we have code like this:
+ *
+ * void g(JSObject *obj) {
+ * JS::Value x;
+ * JS_GetProperty(obj, "x", &x);
+ * ... do stuff with x ...
+ * }
+ *
+ * there's no problem, because the value we've extracted, x, is a Value, a
+ * type that the conservative scanner recognizes.
+ *
+ * Conservative GC frees us from the obligation to explicitly root the types it
+ * knows about, but when we work with derived values like |ch|, we must root
+ * their owners, as the derived value alone won't keep them alive.
+ *
+ * A JS::Anchor is a kind of GC root that allows us to keep the owners of
+ * derived values like |ch| alive throughout the Anchor's lifetime. We could
+ * fix the above code as follows:
+ *
+ * void f(JSString *str) {
+ * JS::Anchor<JSString *> a_str(str);
+ * const jschar *ch = JS_GetStringCharsZ(str);
+ * ... do stuff with ch, but no uses of str ...;
+ * }
+ *
+ * This simply ensures that |str| will be live until |a_str| goes out of scope.
+ * As long as we don't retain a pointer to the string's characters for longer
+ * than that, we have avoided all garbage collection hazards.
+ */
+template<typename T> class AnchorPermitted;
+template<> class AnchorPermitted<JSObject *> { };
+template<> class AnchorPermitted<const JSObject *> { };
+template<> class AnchorPermitted<JSFunction *> { };
+template<> class AnchorPermitted<const JSFunction *> { };
+template<> class AnchorPermitted<JSString *> { };
+template<> class AnchorPermitted<const JSString *> { };
+template<> class AnchorPermitted<Value> { };
+template<> class AnchorPermitted<const JSScript *> { };
+template<> class AnchorPermitted<JSScript *> { };
+
+template<typename T>
+class Anchor : AnchorPermitted<T>
+{
+ public:
+ Anchor() { }
+ explicit Anchor(T t) { hold = t; }
+ inline ~Anchor();
+ T &get() { return hold; }
+ const T &get() const { return hold; }
+ void set(const T &t) { hold = t; }
+ void operator=(const T &t) { hold = t; }
+ void clear() { hold = 0; }
+
+ private:
+ T hold;
+ Anchor(const Anchor &other) MOZ_DELETE;
+ void operator=(const Anchor &other) MOZ_DELETE;
+};
+
+template<typename T>
+inline Anchor<T>::~Anchor()
+{
+#ifdef __GNUC__
+ /*
+ * No code is generated for this. But because this is marked 'volatile', G++ will
+ * assume it has important side-effects, and won't delete it. (G++ never looks at
+ * the actual text and notices it's empty.) And because we have passed |hold| to
+ * it, GCC will keep |hold| alive until this point.
+ *
+ * The "memory" clobber operand ensures that G++ will not move prior memory
+ * accesses after the asm --- it's a barrier. Unfortunately, it also means that
+ * G++ will assume that all memory has changed after the asm, as it would for a
+ * call to an unknown function. I don't know of a way to avoid that consequence.
+ */
+ asm volatile("":: "g" (hold) : "memory");
+#else
+ /*
+ * An adequate portable substitute, for non-structure types.
+ *
+ * The compiler promises that, by the end of an expression statement, the
+ * last-stored value to a volatile object is the same as it would be in an
+ * unoptimized, direct implementation (the "abstract machine" whose behavior the
+ * language spec describes). However, the compiler is still free to reorder
+ * non-volatile accesses across this store --- which is what we must prevent. So
+ * assigning the held value to a volatile variable, as we do here, is not enough.
+ *
+ * In our case, however, garbage collection only occurs at function calls, so it
+ * is sufficient to ensure that the destructor's store isn't moved earlier across
+ * any function calls that could collect. It is hard to imagine the compiler
+ * analyzing the program so thoroughly that it could prove that such motion was
+ * safe. In practice, compilers treat calls to the collector as opaque operations
+ * --- in particular, as operations which could access volatile variables, across
+ * which this destructor must not be moved.
+ *
+ * ("Objection, your honor! *Alleged* killer whale!")
+ *
+ * The disadvantage of this approach is that it does generate code for the store.
+ * We do need to use Anchors in some cases where cycles are tight.
+ *
+ * Note that there is a Anchor<Value>::~Anchor() specialization in Value.h.
+ */
+ volatile T sink;
+ sink = hold;
+#endif /* defined(__GNUC__) */
+}
+
+} // namespace JS
+
+#endif /* js_Anchor_h */
diff --git a/js/public/CallArgs.h b/js/public/CallArgs.h
new file mode 100644
index 0000000..986a3f0
--- /dev/null
+++ b/js/public/CallArgs.h
@@ -0,0 +1,403 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Helper classes encapsulating access to the callee, |this| value, arguments,
+ * and argument count for a function call.
+ *
+ * The intent of JS::CallArgs and JS::CallReceiver is that they be used to
+ * encapsulate access to the un-abstracted |unsigned argc, Value *vp| arguments
+ * to a function. It's possible (albeit deprecated) to manually index into
+ * |vp| to access the callee, |this|, and arguments of a function, and to set
+ * its return value. It's also possible to use the supported API of JS_CALLEE,
+ * JS_THIS, JS_ARGV, JS_RVAL and JS_SET_RVAL to the same ends. But neither API
+ * has the error-handling or moving-GC correctness of CallArgs or CallReceiver.
+ * New code should use CallArgs and CallReceiver instead whenever possible.
+ *
+ * The eventual plan is to change JSNative to take |const CallArgs&| directly,
+ * for automatic assertion of correct use and to make calling functions more
+ * efficient. Embedders should start internally switching away from using
+ * |argc| and |vp| directly, except to create a |CallArgs|. Then, when an
+ * eventual release making that change occurs, porting efforts will require
+ * changing methods' signatures but won't require invasive changes to the
+ * methods' implementations, potentially under time pressure.
+ */
+
+#ifndef js_CallArgs_h
+#define js_CallArgs_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jstypes.h"
+
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+
+struct JSContext;
+class JSObject;
+
+/* Typedef for native functions called by the JS VM. */
+typedef JSBool
+(* JSNative)(JSContext *cx, unsigned argc, JS::Value *vp);
+
+/*
+ * Compute |this| for the |vp| inside a JSNative, either boxing primitives or
+ * replacing with the global object as necessary.
+ *
+ * This method will go away at some point: instead use |args.thisv()|. If the
+ * value is an object, no further work is required. If that value is |null| or
+ * |undefined|, use |JS_GetGlobalForObject| to compute the global object. If
+ * the value is some other primitive, use |JS_ValueToObject| to box it.
+ */
+extern JS_PUBLIC_API(JS::Value)
+JS_ComputeThis(JSContext *cx, JS::Value *vp);
+
+namespace JS {
+
+extern JS_PUBLIC_DATA(const HandleValue) UndefinedHandleValue;
+
+/*
+ * JS::CallReceiver encapsulates access to the callee, |this|, and eventual
+ * return value for a function call. The principal way to create a
+ * CallReceiver is using JS::CallReceiverFromVp:
+ *
+ * static JSBool
+ * FunctionReturningThis(JSContext *cx, unsigned argc, JS::Value *vp)
+ * {
+ * JS::CallReceiver rec = JS::CallReceiverFromVp(vp);
+ *
+ * // Access to the callee must occur before accessing/setting
+ * // the return value.
+ * JSObject &callee = rec.callee();
+ * rec.rval().set(JS::ObjectValue(callee));
+ *
+ * // callee() and calleev() will now assert.
+ *
+ * // It's always fine to access thisv().
+ * HandleValue thisv = rec.thisv();
+ * rec.rval().set(thisv);
+ *
+ * // As the return value was last set to |this|, returns |this|.
+ * return true;
+ * }
+ *
+ * A note on JS_ComputeThis and JS_THIS_OBJECT: these methods currently aren't
+ * part of the CallReceiver interface. We will likely add them at some point.
+ * Until then, you should probably continue using |vp| directly for these two
+ * cases.
+ *
+ * CallReceiver is exposed publicly and used internally. Not all parts of its
+ * public interface are meant to be used by embedders! See inline comments to
+ * for details.
+ */
+
+namespace detail {
+
+enum UsedRval { IncludeUsedRval, NoUsedRval };
+
+template<UsedRval WantUsedRval>
+class MOZ_STACK_CLASS UsedRvalBase;
+
+template<>
+class MOZ_STACK_CLASS UsedRvalBase<IncludeUsedRval>
+{
+ protected:
+ mutable bool usedRval_;
+ void setUsedRval() const { usedRval_ = true; }
+ void clearUsedRval() const { usedRval_ = false; }
+};
+
+template<>
+class MOZ_STACK_CLASS UsedRvalBase<NoUsedRval>
+{
+ protected:
+ void setUsedRval() const {}
+ void clearUsedRval() const {}
+};
+
+template<UsedRval WantUsedRval>
+class MOZ_STACK_CLASS CallReceiverBase : public UsedRvalBase<
+#ifdef JS_DEBUG
+ WantUsedRval
+#else
+ NoUsedRval
+#endif
+ >
+{
+ protected:
+ Value *argv_;
+
+ public:
+ /*
+ * Returns the function being called, as an object. Must not be called
+ * after rval() has been used!
+ */
+ JSObject &callee() const {
+ MOZ_ASSERT(!this->usedRval_);
+ return argv_[-2].toObject();
+ }
+
+ /*
+ * Returns the function being called, as a value. Must not be called after
+ * rval() has been used!
+ */
+ HandleValue calleev() const {
+ MOZ_ASSERT(!this->usedRval_);
+ return HandleValue::fromMarkedLocation(&argv_[-2]);
+ }
+
+ /*
+ * Returns the |this| value passed to the function. This method must not
+ * be called when the function is being called as a constructor via |new|.
+ * The value may or may not be an object: it is the individual function's
+ * responsibility to box the value if needed.
+ */
+ HandleValue thisv() const {
+ // Some internal code uses thisv() in constructing cases, so don't do
+ // this yet.
+ // MOZ_ASSERT(!argv_[-1].isMagic(JS_IS_CONSTRUCTING));
+ return HandleValue::fromMarkedLocation(&argv_[-1]);
+ }
+
+ Value computeThis(JSContext *cx) const {
+ if (thisv().isObject())
+ return thisv();
+
+ return JS_ComputeThis(cx, base());
+ }
+
+ /*
+ * Returns the currently-set return value. The initial contents of this
+ * value are unspecified. Once this method has been called, callee() and
+ * calleev() can no longer be used. (If you're compiling against a debug
+ * build of SpiderMonkey, these methods will assert to aid debugging.)
+ *
+ * If the method you're implementing succeeds by returning true, you *must*
+ * set this. (SpiderMonkey doesn't currently assert this, but it will do
+ * so eventually.) You don't need to use or change this if your method
+ * fails.
+ */
+ MutableHandleValue rval() const {
+ this->setUsedRval();
+ return MutableHandleValue::fromMarkedLocation(&argv_[-2]);
+ }
+
+ public:
+ // These methods are only intended for internal use. Embedders shouldn't
+ // use them!
+
+ Value *base() const { return argv_ - 2; }
+
+ Value *spAfterCall() const {
+ this->setUsedRval();
+ return argv_ - 1;
+ }
+
+ public:
+ // These methods are publicly exposed, but they are *not* to be used when
+ // implementing a JSNative method and encapsulating access to |vp| within
+ // it. You probably don't want to use these!
+
+ void setCallee(Value aCalleev) const {
+ this->clearUsedRval();
+ argv_[-2] = aCalleev;
+ }
+
+ void setThis(Value aThisv) const {
+ argv_[-1] = aThisv;
+ }
+
+ MutableHandleValue mutableThisv() const {
+ return MutableHandleValue::fromMarkedLocation(&argv_[-1]);
+ }
+};
+
+} // namespace detail
+
+class MOZ_STACK_CLASS CallReceiver : public detail::CallReceiverBase<detail::IncludeUsedRval>
+{
+ private:
+ friend CallReceiver CallReceiverFromVp(Value *vp);
+ friend CallReceiver CallReceiverFromArgv(Value *argv);
+};
+
+MOZ_ALWAYS_INLINE CallReceiver
+CallReceiverFromArgv(Value *argv)
+{
+ CallReceiver receiver;
+ receiver.clearUsedRval();
+ receiver.argv_ = argv;
+ return receiver;
+}
+
+MOZ_ALWAYS_INLINE CallReceiver
+CallReceiverFromVp(Value *vp)
+{
+ return CallReceiverFromArgv(vp + 2);
+}
+
+/*
+ * JS::CallArgs encapsulates everything JS::CallReceiver does, plus access to
+ * the function call's arguments. The principal way to create a CallArgs is
+ * like so, using JS::CallArgsFromVp:
+ *
+ * static JSBool
+ * FunctionReturningArgcTimesArg0(JSContext *cx, unsigned argc, JS::Value *vp)
+ * {
+ * JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ *
+ * // Guard against no arguments or a non-numeric arg0.
+ * if (args.length() == 0 || !args[0].isNumber()) {
+ * args.rval().setInt32(0);
+ * return true;
+ * }
+ *
+ * args.rval().set(JS::NumberValue(args.length() * args[0].toNumber()));
+ * return true;
+ * }
+ *
+ * CallArgs is exposed publicly and used internally. Not all parts of its
+ * public interface are meant to be used by embedders! See inline comments to
+ * for details.
+ */
+namespace detail {
+
+template<UsedRval WantUsedRval>
+class MOZ_STACK_CLASS CallArgsBase :
+ public mozilla::Conditional<WantUsedRval == detail::IncludeUsedRval,
+ CallReceiver,
+ CallReceiverBase<NoUsedRval> >::Type
+{
+ protected:
+ unsigned argc_;
+
+ public:
+ /* Returns the number of arguments. */
+ unsigned length() const { return argc_; }
+
+ /* Returns the i-th zero-indexed argument. */
+ Value &operator[](unsigned i) const {
+ MOZ_ASSERT(i < argc_);
+ return this->argv_[i];
+ }
+
+ /* Returns a mutable handle for the i-th zero-indexed argument. */
+ MutableHandleValue handleAt(unsigned i) const {
+ MOZ_ASSERT(i < argc_);
+ return MutableHandleValue::fromMarkedLocation(&this->argv_[i]);
+ }
+
+ /*
+ * Returns the i-th zero-indexed argument, or |undefined| if there's no
+ * such argument.
+ */
+ Value get(unsigned i) const {
+ return i < length() ? this->argv_[i] : UndefinedValue();
+ }
+
+ /*
+ * Returns the i-th zero-indexed argument as a handle, or |undefined| if
+ * there's no such argument.
+ */
+ HandleValue handleOrUndefinedAt(unsigned i) const {
+ return i < length()
+ ? HandleValue::fromMarkedLocation(&this->argv_[i])
+ : UndefinedHandleValue;
+ }
+
+ /*
+ * Returns true if the i-th zero-indexed argument is present and is not
+ * |undefined|.
+ */
+ bool hasDefined(unsigned i) const {
+ return i < argc_ && !this->argv_[i].isUndefined();
+ }
+
+ public:
+ // These methods are publicly exposed, but we're less sure of the interface
+ // here than we'd like (because they're hackish and drop assertions). Try
+ // to avoid using these if you can.
+
+ Value *array() const { return this->argv_; }
+ Value *end() const { return this->argv_ + argc_; }
+};
+
+} // namespace detail
+
+class MOZ_STACK_CLASS CallArgs : public detail::CallArgsBase<detail::IncludeUsedRval>
+{
+ private:
+ friend CallArgs CallArgsFromVp(unsigned argc, Value *vp);
+ friend CallArgs CallArgsFromSp(unsigned argc, Value *sp);
+
+ static CallArgs create(unsigned argc, Value *argv) {
+ CallArgs args;
+ args.clearUsedRval();
+ args.argv_ = argv;
+ args.argc_ = argc;
+ return args;
+ }
+
+};
+
+MOZ_ALWAYS_INLINE CallArgs
+CallArgsFromVp(unsigned argc, Value *vp)
+{
+ return CallArgs::create(argc, vp + 2);
+}
+
+// This method is only intended for internal use in SpiderMonkey. We may
+// eventually move it to an internal header. Embedders should use
+// JS::CallArgsFromVp!
+MOZ_ALWAYS_INLINE CallArgs
+CallArgsFromSp(unsigned argc, Value *sp)
+{
+ return CallArgs::create(argc, sp - argc);
+}
+
+} // namespace JS
+
+/*
+ * Macros to hide interpreter stack layout details from a JSNative using its
+ * JS::Value *vp parameter. DO NOT USE THESE! Instead use JS::CallArgs and
+ * friends, above. These macros will be removed when we change JSNative to
+ * take a const JS::CallArgs&.
+ */
+
+#define JS_CALLEE(cx,vp) ((vp)[0])
+#define JS_THIS_OBJECT(cx,vp) (JSVAL_TO_OBJECT(JS_THIS(cx,vp)))
+#define JS_ARGV(cx,vp) ((vp) + 2)
+#define JS_RVAL(cx,vp) (*(vp))
+#define JS_SET_RVAL(cx,vp,v) (*(vp) = (v))
+
+/*
+ * Note: if this method returns null, an error has occurred and must be
+ * propagated or caught.
+ */
+MOZ_ALWAYS_INLINE JS::Value
+JS_THIS(JSContext *cx, JS::Value *vp)
+{
+ return JSVAL_IS_PRIMITIVE(vp[1]) ? JS_ComputeThis(cx, vp) : vp[1];
+}
+
+/*
+ * |this| is passed to functions in ES5 without change. Functions themselves
+ * do any post-processing they desire to box |this|, compute the global object,
+ * &c. This macro retrieves a function's unboxed |this| value.
+ *
+ * This macro must not be used in conjunction with JS_THIS or JS_THIS_OBJECT,
+ * or vice versa. Either use the provided this value with this macro, or
+ * compute the boxed |this| value using those. JS_THIS_VALUE must not be used
+ * if the function is being called as a constructor.
+ *
+ * But: DO NOT USE THIS! Instead use JS::CallArgs::thisv(), above.
+ *
+ */
+#define JS_THIS_VALUE(cx,vp) ((vp)[1])
+
+#endif /* js_CallArgs_h */
diff --git a/js/public/CharacterEncoding.h b/js/public/CharacterEncoding.h
new file mode 100644
index 0000000..7065a4b
--- /dev/null
+++ b/js/public/CharacterEncoding.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_CharacterEncoding_h
+#define js_CharacterEncoding_h
+
+#include "mozilla/Range.h"
+
+#include "js/Utility.h"
+
+#include "jspubtd.h"
+
+namespace JS {
+
+/*
+ * By default, all C/C++ 1-byte-per-character strings passed into the JSAPI
+ * are treated as ISO/IEC 8859-1, also known as Latin-1. That is, each
+ * byte is treated as a 2-byte character, and there is no way to pass in a
+ * string containing characters beyond U+00FF.
+ */
+class Latin1Chars : public mozilla::Range<unsigned char>
+{
+ typedef mozilla::Range<unsigned char> Base;
+
+ public:
+ Latin1Chars() : Base() {}
+ Latin1Chars(char *aBytes, size_t aLength) : Base(reinterpret_cast<unsigned char *>(aBytes), aLength) {}
+ Latin1Chars(const char *aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char *>(const_cast<char *>(aBytes)), aLength)
+ {}
+};
+
+/*
+ * A Latin1Chars, but with \0 termination for C compatibility.
+ */
+class Latin1CharsZ : public mozilla::RangedPtr<unsigned char>
+{
+ typedef mozilla::RangedPtr<unsigned char> Base;
+
+ public:
+ Latin1CharsZ() : Base(NULL, 0) {}
+
+ Latin1CharsZ(char *aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char *>(aBytes), aLength)
+ {
+ JS_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ Latin1CharsZ(unsigned char *aBytes, size_t aLength)
+ : Base(aBytes, aLength)
+ {
+ JS_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ char *c_str() { return reinterpret_cast<char *>(get()); }
+};
+
+/*
+ * SpiderMonkey also deals directly with UTF-8 encoded text in some places.
+ */
+class UTF8CharsZ : public mozilla::RangedPtr<unsigned char>
+{
+ typedef mozilla::RangedPtr<unsigned char> Base;
+
+ public:
+ UTF8CharsZ() : Base(NULL, 0) {}
+
+ UTF8CharsZ(char *aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char *>(aBytes), aLength)
+ {
+ JS_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ UTF8CharsZ(unsigned char *aBytes, size_t aLength)
+ : Base(aBytes, aLength)
+ {
+ JS_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ char *c_str() { return reinterpret_cast<char *>(get()); }
+};
+
+/*
+ * SpiderMonkey uses a 2-byte character representation: it is a
+ * 2-byte-at-a-time view of a UTF-16 byte stream. This is similar to UCS-2,
+ * but unlike UCS-2, we do not strip UTF-16 extension bytes. This allows a
+ * sufficiently dedicated JavaScript program to be fully unicode-aware by
+ * manually interpreting UTF-16 extension characters embedded in the JS
+ * string.
+ */
+class TwoByteChars : public mozilla::Range<jschar>
+{
+ typedef mozilla::Range<jschar> Base;
+
+ public:
+ TwoByteChars() : Base() {}
+ TwoByteChars(jschar *aChars, size_t aLength) : Base(aChars, aLength) {}
+ TwoByteChars(const jschar *aChars, size_t aLength) : Base(const_cast<jschar *>(aChars), aLength) {}
+};
+
+/*
+ * A non-convertible variant of TwoByteChars that does not refer to characters
+ * inlined inside a JSShortString or a JSInlineString. StableTwoByteChars are
+ * thus safe to hold across a GC.
+ */
+class StableTwoByteChars : public mozilla::Range<jschar>
+{
+ typedef mozilla::Range<jschar> Base;
+
+ public:
+ StableTwoByteChars() : Base() {}
+ StableTwoByteChars(jschar *aChars, size_t aLength) : Base(aChars, aLength) {}
+ StableTwoByteChars(const jschar *aChars, size_t aLength) : Base(const_cast<jschar *>(aChars), aLength) {}
+};
+
+/*
+ * A TwoByteChars, but \0 terminated for compatibility with JSFlatString.
+ */
+class TwoByteCharsZ : public mozilla::RangedPtr<jschar>
+{
+ typedef mozilla::RangedPtr<jschar> Base;
+
+ public:
+ TwoByteCharsZ(jschar *chars, size_t length)
+ : Base(chars, length)
+ {
+ JS_ASSERT(chars[length] = '\0');
+ }
+};
+
+/*
+ * Convert a 2-byte character sequence to "ISO-Latin-1". This works by
+ * truncating each 2-byte pair in the sequence to a 1-byte pair. If the source
+ * contains any UTF-16 extension characters, then this may give invalid Latin1
+ * output. The returned string is zero terminated. The returned string or the
+ * returned string's |start()| must be freed with JS_free or js_free,
+ * respectively. If allocation fails, an OOM error will be set and the method
+ * will return a NULL chars (which can be tested for with the ! operator).
+ * This method cannot trigger GC.
+ */
+extern Latin1CharsZ
+LossyTwoByteCharsToNewLatin1CharsZ(JSContext *cx, TwoByteChars tbchars);
+
+extern UTF8CharsZ
+TwoByteCharsToNewUTF8CharsZ(JSContext *cx, TwoByteChars tbchars);
+
+} // namespace JS
+
+inline void JS_free(JS::Latin1CharsZ &ptr) { js_free((void*)ptr.get()); }
+inline void JS_free(JS::UTF8CharsZ &ptr) { js_free((void*)ptr.get()); }
+
+#endif /* js_CharacterEncoding_h */
diff --git a/js/public/Date.h b/js/public/Date.h
new file mode 100644
index 0000000..6199f9e
--- /dev/null
+++ b/js/public/Date.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Date_h
+#define js_Date_h
+
+#include "jstypes.h"
+
+namespace JS {
+
+// Year is a year, month is 0-11, day is 1-based. The return value is
+// a number of milliseconds since the epoch. Can return NaN.
+JS_PUBLIC_API(double)
+MakeDate(double year, unsigned month, unsigned day);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// year. Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+YearFromTime(double time);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// month (0-11). Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+MonthFromTime(double time);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// day (1-based). Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+DayFromTime(double time);
+
+} // namespace JS
+
+#endif /* js_Date_h */
diff --git a/js/public/GCAPI.h b/js/public/GCAPI.h
new file mode 100644
index 0000000..825efc8
--- /dev/null
+++ b/js/public/GCAPI.h
@@ -0,0 +1,320 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_GCAPI_h
+#define js_GCAPI_h
+
+#include "HeapAPI.h"
+#include "jsfriendapi.h"
+
+namespace JS {
+
+#define GCREASONS(D) \
+ /* Reasons internal to the JS engine */ \
+ D(API) \
+ D(MAYBEGC) \
+ D(LAST_CONTEXT) \
+ D(DESTROY_CONTEXT) \
+ D(LAST_DITCH) \
+ D(TOO_MUCH_MALLOC) \
+ D(ALLOC_TRIGGER) \
+ D(DEBUG_GC) \
+ D(DEBUG_MODE_GC) \
+ D(TRANSPLANT) \
+ D(RESET) \
+ D(OUT_OF_NURSERY) \
+ D(EVICT_NURSERY) \
+ D(FULL_STORE_BUFFER) \
+ \
+ /* These are reserved for future use. */ \
+ D(RESERVED0) \
+ D(RESERVED1) \
+ D(RESERVED2) \
+ D(RESERVED3) \
+ D(RESERVED4) \
+ D(RESERVED5) \
+ D(RESERVED6) \
+ D(RESERVED7) \
+ D(RESERVED8) \
+ D(RESERVED9) \
+ D(RESERVED10) \
+ D(RESERVED11) \
+ D(RESERVED12) \
+ D(RESERVED13) \
+ D(RESERVED14) \
+ D(RESERVED15) \
+ D(RESERVED16) \
+ D(RESERVED17) \
+ D(RESERVED18) \
+ D(RESERVED19) \
+ \
+ /* Reasons from Firefox */ \
+ D(DOM_WINDOW_UTILS) \
+ D(COMPONENT_UTILS) \
+ D(MEM_PRESSURE) \
+ D(CC_WAITING) \
+ D(CC_FORCED) \
+ D(LOAD_END) \
+ D(POST_COMPARTMENT) \
+ D(PAGE_HIDE) \
+ D(NSJSCONTEXT_DESTROY) \
+ D(SET_NEW_DOCUMENT) \
+ D(SET_DOC_SHELL) \
+ D(DOM_UTILS) \
+ D(DOM_IPC) \
+ D(DOM_WORKER) \
+ D(INTER_SLICE_GC) \
+ D(REFRESH_FRAME) \
+ D(FULL_GC_TIMER) \
+ D(SHUTDOWN_CC) \
+ D(FINISH_LARGE_EVALUTE)
+
+namespace gcreason {
+
+/* GCReasons will end up looking like JSGC_MAYBEGC */
+enum Reason {
+#define MAKE_REASON(name) name,
+ GCREASONS(MAKE_REASON)
+#undef MAKE_REASON
+ NO_REASON,
+ NUM_REASONS,
+
+ /*
+ * For telemetry, we want to keep a fixed max bucket size over time so we
+ * don't have to switch histograms. 100 is conservative; as of this writing
+ * there are 26. But the cost of extra buckets seems to be low while the
+ * cost of switching histograms is high.
+ */
+ NUM_TELEMETRY_REASONS = 100
+};
+
+} /* namespace gcreason */
+
+extern JS_FRIEND_API(void)
+PrepareZoneForGC(Zone *zone);
+
+extern JS_FRIEND_API(void)
+PrepareForFullGC(JSRuntime *rt);
+
+extern JS_FRIEND_API(void)
+PrepareForIncrementalGC(JSRuntime *rt);
+
+extern JS_FRIEND_API(bool)
+IsGCScheduled(JSRuntime *rt);
+
+extern JS_FRIEND_API(void)
+SkipZoneForGC(Zone *zone);
+
+/*
+ * When triggering a GC using one of the functions below, it is first necessary
+ * to select the compartments to be collected. To do this, you can call
+ * PrepareZoneForGC on each compartment, or you can call PrepareForFullGC
+ * to select all compartments. Failing to select any compartment is an error.
+ */
+
+extern JS_FRIEND_API(void)
+GCForReason(JSRuntime *rt, gcreason::Reason reason);
+
+extern JS_FRIEND_API(void)
+ShrinkingGC(JSRuntime *rt, gcreason::Reason reason);
+
+extern JS_FRIEND_API(void)
+ShrinkGCBuffers(JSRuntime *rt);
+
+extern JS_FRIEND_API(void)
+IncrementalGC(JSRuntime *rt, gcreason::Reason reason, int64_t millis = 0);
+
+extern JS_FRIEND_API(void)
+FinishIncrementalGC(JSRuntime *rt, gcreason::Reason reason);
+
+enum GCProgress {
+ /*
+ * During non-incremental GC, the GC is bracketed by JSGC_CYCLE_BEGIN/END
+ * callbacks. During an incremental GC, the sequence of callbacks is as
+ * follows:
+ * JSGC_CYCLE_BEGIN, JSGC_SLICE_END (first slice)
+ * JSGC_SLICE_BEGIN, JSGC_SLICE_END (second slice)
+ * ...
+ * JSGC_SLICE_BEGIN, JSGC_CYCLE_END (last slice)
+ */
+
+ GC_CYCLE_BEGIN,
+ GC_SLICE_BEGIN,
+ GC_SLICE_END,
+ GC_CYCLE_END
+};
+
+struct JS_FRIEND_API(GCDescription) {
+ bool isCompartment_;
+
+ GCDescription(bool isCompartment)
+ : isCompartment_(isCompartment) {}
+
+ jschar *formatMessage(JSRuntime *rt) const;
+ jschar *formatJSON(JSRuntime *rt, uint64_t timestamp) const;
+};
+
+typedef void
+(* GCSliceCallback)(JSRuntime *rt, GCProgress progress, const GCDescription &desc);
+
+extern JS_FRIEND_API(GCSliceCallback)
+SetGCSliceCallback(JSRuntime *rt, GCSliceCallback callback);
+
+/*
+ * Signals a good place to do an incremental slice, because the browser is
+ * drawing a frame.
+ */
+extern JS_FRIEND_API(void)
+NotifyDidPaint(JSRuntime *rt);
+
+extern JS_FRIEND_API(bool)
+IsIncrementalGCEnabled(JSRuntime *rt);
+
+JS_FRIEND_API(bool)
+IsIncrementalGCInProgress(JSRuntime *rt);
+
+extern JS_FRIEND_API(void)
+DisableIncrementalGC(JSRuntime *rt);
+
+extern JS_FRIEND_API(void)
+DisableGenerationalGC(JSRuntime *rt);
+
+extern JS_FRIEND_API(bool)
+IsIncrementalBarrierNeeded(JSRuntime *rt);
+
+extern JS_FRIEND_API(bool)
+IsIncrementalBarrierNeeded(JSContext *cx);
+
+extern JS_FRIEND_API(void)
+IncrementalReferenceBarrier(void *ptr, JSGCTraceKind kind);
+
+extern JS_FRIEND_API(void)
+IncrementalValueBarrier(const Value &v);
+
+extern JS_FRIEND_API(void)
+IncrementalObjectBarrier(JSObject *obj);
+
+extern JS_FRIEND_API(void)
+PokeGC(JSRuntime *rt);
+
+/* Was the most recent GC run incrementally? */
+extern JS_FRIEND_API(bool)
+WasIncrementalGC(JSRuntime *rt);
+
+class ObjectPtr
+{
+ JSObject *value;
+
+ public:
+ ObjectPtr() : value(NULL) {}
+
+ ObjectPtr(JSObject *obj) : value(obj) {}
+
+ /* Always call finalize before the destructor. */
+ ~ObjectPtr() { JS_ASSERT(!value); }
+
+ void finalize(JSRuntime *rt) {
+ if (IsIncrementalBarrierNeeded(rt))
+ IncrementalObjectBarrier(value);
+ value = NULL;
+ }
+
+ void init(JSObject *obj) { value = obj; }
+
+ JSObject *get() const { return value; }
+
+ void writeBarrierPre(JSRuntime *rt) {
+ IncrementalObjectBarrier(value);
+ }
+
+ bool isAboutToBeFinalized() {
+ return JS_IsAboutToBeFinalized(&value);
+ }
+
+ ObjectPtr &operator=(JSObject *obj) {
+ IncrementalObjectBarrier(value);
+ value = obj;
+ return *this;
+ }
+
+ void trace(JSTracer *trc, const char *name) {
+ JS_CallObjectTracer(trc, &value, name);
+ }
+
+ JSObject &operator*() const { return *value; }
+ JSObject *operator->() const { return value; }
+ operator JSObject *() const { return value; }
+};
+
+/*
+ * Unsets the gray bit for anything reachable from |thing|. |kind| should not be
+ * JSTRACE_SHAPE. |thing| should be non-null.
+ */
+extern JS_FRIEND_API(void)
+UnmarkGrayGCThingRecursively(void *thing, JSGCTraceKind kind);
+
+/*
+ * This should be called when an object that is marked gray is exposed to the JS
+ * engine (by handing it to running JS code or writing it into live JS
+ * data). During incremental GC, since the gray bits haven't been computed yet,
+ * we conservatively mark the object black.
+ */
+static JS_ALWAYS_INLINE void
+ExposeGCThingToActiveJS(void *thing, JSGCTraceKind kind)
+{
+ JS_ASSERT(kind != JSTRACE_SHAPE);
+
+ shadow::Runtime *rt = js::gc::GetGCThingRuntime(thing);
+#ifdef JSGC_GENERATIONAL
+ /*
+ * GC things residing in the nursery cannot be gray: they have no mark bits.
+ * All live objects in the nursery are moved to tenured at the beginning of
+ * each GC slice, so the gray marker never sees nursery things.
+ */
+ if (uintptr_t(thing) >= rt->gcNurseryStart_ && uintptr_t(thing) < rt->gcNurseryEnd_)
+ return;
+#endif
+ if (IsIncrementalBarrierNeededOnGCThing(rt, thing, kind))
+ IncrementalReferenceBarrier(thing, kind);
+ else if (GCThingIsMarkedGray(thing))
+ UnmarkGrayGCThingRecursively(thing, kind);
+}
+
+static JS_ALWAYS_INLINE void
+ExposeValueToActiveJS(const Value &v)
+{
+ if (v.isMarkable())
+ ExposeGCThingToActiveJS(v.toGCThing(), v.gcKind());
+}
+
+/*
+ * If a GC is currently marking, mark the object black.
+ */
+static JS_ALWAYS_INLINE void
+MarkGCThingAsLive(JSRuntime *rt_, void *thing, JSGCTraceKind kind)
+{
+ shadow::Runtime *rt = reinterpret_cast<JS::shadow::Runtime*>(rt_);
+
+#ifdef JSGC_GENERATIONAL
+ /*
+ * Any object in the nursery will not be freed during any GC running at that time.
+ */
+ if (js::gc::IsInsideNursery(rt, thing))
+ return;
+#endif
+ if (IsIncrementalBarrierNeededOnGCThing(rt, thing, kind))
+ IncrementalReferenceBarrier(thing, kind);
+}
+
+static JS_ALWAYS_INLINE void
+MarkStringAsLive(JSContext *cx, JSString *string)
+{
+ MarkGCThingAsLive(js::GetRuntime(cx), string, JSTRACE_STRING);
+}
+
+} /* namespace JS */
+
+#endif /* js_GCAPI_h */
diff --git a/js/public/HashTable.h b/js/public/HashTable.h
new file mode 100644
index 0000000..4d1de4a
--- /dev/null
+++ b/js/public/HashTable.h
@@ -0,0 +1,1461 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_HashTable_h
+#define js_HashTable_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TypeTraits.h"
+#include "mozilla/Util.h"
+
+#include "js/TemplateLib.h"
+#include "js/Utility.h"
+
+namespace js {
+
+class TempAllocPolicy;
+template <class> struct DefaultHasher;
+template <class, class> class HashMapEntry;
+namespace detail {
+ template <class T> class HashTableEntry;
+ template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+}
+
+/*****************************************************************************/
+
+// A JS-friendly, STL-like container providing a hash-based map from keys to
+// values. In particular, HashMap calls constructors and destructors of all
+// objects added so non-PODs may be used safely.
+//
+// Key/Value requirements:
+// - movable, destructible, assignable
+// HashPolicy requirements:
+// - see Hash Policy section below
+// AllocPolicy:
+// - see jsalloc.h
+//
+// Note:
+// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
+// called by HashMap must not call back into the same HashMap object.
+// - Due to the lack of exception handling, the user must call |init()|.
+template <class Key,
+ class Value,
+ class HashPolicy = DefaultHasher<Key>,
+ class AllocPolicy = TempAllocPolicy>
+class HashMap
+{
+ typedef HashMapEntry<Key, Value> TableEntry;
+
+ struct MapHashPolicy : HashPolicy
+ {
+ typedef Key KeyType;
+ static const Key &getKey(TableEntry &e) { return e.key; }
+ static void setKey(TableEntry &e, Key &k) { const_cast<Key &>(e.key) = k; }
+ };
+
+ typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef TableEntry Entry;
+
+ // HashMap construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashMap and check the return value.
+ HashMap(AllocPolicy a = AllocPolicy())
+ : impl(a)
+ {
+ MOZ_STATIC_ASSERT(tl::IsRelocatableHeapType<Key>::result,
+ "Key type must be relocatable");
+ MOZ_STATIC_ASSERT(tl::IsRelocatableHeapType<Value>::result,
+ "Value type must be relocatable");
+ }
+
+ bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
+
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // if (HM::Ptr p = h.lookup(3)) {
+ // const HM::Entry &e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above (with T = Entry).
+ typedef typename Impl::Ptr Ptr;
+ Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
+
+ // Like lookup, but does not assert if two threads call lookup at the same
+ // time. Only use this method when none of the threads will modify the map.
+ Ptr readonlyThreadsafeLookup(const Lookup &l) const { return impl.readonlyThreadsafeLookup(l); }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
+ // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry &e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ //
+ // Also see the definition of AddPtr in HashTable above (with T = Entry).
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry &e = *p;
+ // assert(p->key == 3);
+ // char val = p->value;
+ typedef typename Impl::AddPtr AddPtr;
+ AddPtr lookupForAdd(const Lookup &l) const {
+ return impl.lookupForAdd(l);
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ bool add(AddPtr &p, const KeyInput &k, const ValueInput &v) {
+ Entry e(k, v);
+ return impl.add(p, Move(e));
+ }
+
+ bool add(AddPtr &p, const Key &k) {
+ Entry e(k, Value());
+ return impl.add(p, Move(e));
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ bool relookupOrAdd(AddPtr &p, const KeyInput &k, const ValueInput &v) {
+ Entry e(k, v);
+ return impl.relookupOrAdd(p, k, Move(e));
+ }
+
+ // |all()| returns a Range containing |count()| elements. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // for (HM::Range r = h.all(); !r.empty(); r.popFront())
+ // char c = r.front().value;
+ //
+ // Also see the definition of Range in HashTable above (with T = Entry).
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
+
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM s;
+ // for (HM::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front().value == 'l')
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above (with T = Entry).
+ typedef typename Impl::Enum Enum;
+
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all entries without triggering destructors. This method is unsafe.
+ void clearWithoutCallingDestructors() { impl.clearWithoutCallingDestructors(); }
+
+ // Remove all the entries and release all internal buffers. The map must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
+
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
+
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
+
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
+
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashMap.
+ size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // If |generation()| is the same before and after a HashMap operation,
+ // pointers into the table remain valid.
+ unsigned generation() const { return impl.generation(); }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup &l) const {
+ return impl.lookup(l) != NULL;
+ }
+
+ // Overwrite existing value with v. Return false on oom.
+ template<typename KeyInput, typename ValueInput>
+ bool put(const KeyInput &k, const ValueInput &v) {
+ AddPtr p = lookupForAdd(k);
+ if (p) {
+ p->value = v;
+ return true;
+ }
+ return add(p, k, v);
+ }
+
+ // Like put, but assert that the given key is not already present.
+ template<typename KeyInput, typename ValueInput>
+ bool putNew(const KeyInput &k, const ValueInput &v) {
+ Entry e(k, v);
+ return impl.putNew(k, Move(e));
+ }
+
+ // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
+ Ptr lookupWithDefault(const Key &k, const Value &defaultValue) {
+ AddPtr p = lookupForAdd(k);
+ if (p)
+ return p;
+ (void)add(p, k, defaultValue); // p is left false-y on oom.
+ return p;
+ }
+
+ // Remove if present.
+ void remove(const Lookup &l) {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
+
+ // HashMap is movable
+ HashMap(MoveRef<HashMap> rhs) : impl(Move(rhs->impl)) {}
+ void operator=(MoveRef<HashMap> rhs) { impl = Move(rhs->impl); }
+
+ private:
+ // HashMap is not copyable or assignable
+ HashMap(const HashMap &hm) MOZ_DELETE;
+ HashMap &operator=(const HashMap &hm) MOZ_DELETE;
+
+ friend class Impl::Enum;
+};
+
+/*****************************************************************************/
+
+// A JS-friendly, STL-like container providing a hash-based set of values. In
+// particular, HashSet calls constructors and destructors of all objects added
+// so non-PODs may be used safely.
+//
+// T requirements:
+// - movable, destructible, assignable
+// HashPolicy requirements:
+// - see Hash Policy section below
+// AllocPolicy:
+// - see jsalloc.h
+//
+// Note:
+// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
+// HashSet must not call back into the same HashSet object.
+// - Due to the lack of exception handling, the user must call |init()|.
+template <class T,
+ class HashPolicy = DefaultHasher<T>,
+ class AllocPolicy = TempAllocPolicy>
+class HashSet
+{
+ struct SetOps : HashPolicy
+ {
+ typedef T KeyType;
+ static const KeyType &getKey(const T &t) { return t; }
+ static void setKey(T &t, KeyType &k) { t = k; }
+ };
+
+ typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef T Entry;
+
+ // HashSet construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashSet and check the return value.
+ HashSet(AllocPolicy a = AllocPolicy()) : impl(a)
+ {
+ MOZ_STATIC_ASSERT(tl::IsRelocatableHeapType<T>::result,
+ "Set element type must be relocatable");
+ }
+ bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
+
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // if (HS::Ptr p = h.lookup(3)) {
+ // assert(*p == 3); // p acts like a pointer to int
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above.
+ typedef typename Impl::Ptr Ptr;
+ Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
+ // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3))
+ // return false;
+ // }
+ // assert(*p == 3); // p acts like a pointer to int
+ //
+ // Also see the definition of AddPtr in HashTable above.
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 3))
+ // return false;
+ // }
+ // assert(*p == 3);
+ //
+ // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
+ // entry |t|, where the caller ensures match(l,t).
+ typedef typename Impl::AddPtr AddPtr;
+ AddPtr lookupForAdd(const Lookup &l) const { return impl.lookupForAdd(l); }
+
+ bool add(AddPtr &p, const T &t) { return impl.add(p, t); }
+
+ bool relookupOrAdd(AddPtr &p, const Lookup &l, const T &t) {
+ return impl.relookupOrAdd(p, l, t);
+ }
+
+ // |all()| returns a Range containing |count()| elements:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // for (HS::Range r = h.all(); !r.empty(); r.popFront())
+ // int i = r.front();
+ //
+ // Also see the definition of Range in HashTable above.
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
+
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashSet<int> HS;
+ // HS s;
+ // for (HS::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above.
+ typedef typename Impl::Enum Enum;
+
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all the entries and release all internal buffers. The set must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
+
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
+
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
+
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
+
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashSet.
+ size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // If |generation()| is the same before and after a HashSet operation,
+ // pointers into the table remain valid.
+ unsigned generation() const { return impl.generation(); }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup &l) const {
+ return impl.lookup(l) != NULL;
+ }
+
+ // Overwrite existing value with v. Return false on oom.
+ bool put(const T &t) {
+ AddPtr p = lookupForAdd(t);
+ return p ? true : add(p, t);
+ }
+
+ // Like put, but assert that the given key is not already present.
+ bool putNew(const T &t) {
+ return impl.putNew(t, t);
+ }
+
+ bool putNew(const Lookup &l, const T &t) {
+ return impl.putNew(l, t);
+ }
+
+ void remove(const Lookup &l) {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
+
+ // HashSet is movable
+ HashSet(MoveRef<HashSet> rhs) : impl(Move(rhs->impl)) {}
+ void operator=(MoveRef<HashSet> rhs) { impl = Move(rhs->impl); }
+
+ private:
+ // HashSet is not copyable or assignable
+ HashSet(const HashSet &hs) MOZ_DELETE;
+ HashSet &operator=(const HashSet &hs) MOZ_DELETE;
+
+ friend class Impl::Enum;
+};
+
+/*****************************************************************************/
+
+// Hash Policy
+//
+// A hash policy P for a hash table with key-type Key must provide:
+// - a type |P::Lookup| to use to lookup table entries;
+// - a static member function |P::hash| with signature
+//
+// static js::HashNumber hash(Lookup)
+//
+// to use to hash the lookup type; and
+// - a static member function |P::match| with signature
+//
+// static bool match(Key, Lookup)
+//
+// to use to test equality of key and lookup values.
+//
+// Normally, Lookup = Key. In general, though, different values and types of
+// values can be used to lookup and store. If a Lookup value |l| is != to the
+// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
+//
+// js::HashSet<Key, P>::AddPtr p = h.lookup(l);
+// if (!p) {
+// assert(P::match(k, l)); // must hold
+// h.add(p, k);
+// }
+
+// Pointer hashing policy that strips the lowest zeroBits when calculating the
+// hash to improve key distribution.
+template <typename Key, size_t zeroBits>
+struct PointerHasher
+{
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup &l) {
+ JS_ASSERT(!JS::IsPoisonedPtr(l));
+ size_t word = reinterpret_cast<size_t>(l) >> zeroBits;
+ JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
+#if JS_BYTES_PER_WORD == 4
+ return HashNumber(word);
+#else
+ JS_STATIC_ASSERT(sizeof word == 8);
+ return HashNumber((word >> 32) ^ word);
+#endif
+ }
+ static bool match(const Key &k, const Lookup &l) {
+ JS_ASSERT(!JS::IsPoisonedPtr(k));
+ JS_ASSERT(!JS::IsPoisonedPtr(l));
+ return k == l;
+ }
+};
+
+// Default hash policy: just use the 'lookup' value. This of course only
+// works if the lookup value is integral. HashTable applies ScrambleHashCode to
+// the result of the 'hash' which means that it is 'ok' if the lookup value is
+// not well distributed over the HashNumber domain.
+template <class Key>
+struct DefaultHasher
+{
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup &l) {
+ // Hash if can implicitly cast to hash number type.
+ return l;
+ }
+ static bool match(const Key &k, const Lookup &l) {
+ // Use builtin or overloaded operator==.
+ return k == l;
+ }
+};
+
+// Specialize hashing policy for pointer types. It assumes that the type is
+// at least word-aligned. For types with smaller size use PointerHasher.
+template <class T>
+struct DefaultHasher<T *> : PointerHasher<T *, tl::FloorLog2<sizeof(void *)>::result>
+{};
+
+// For doubles, we can xor the two uint32s.
+template <>
+struct DefaultHasher<double>
+{
+ typedef double Lookup;
+ static HashNumber hash(double d) {
+ JS_STATIC_ASSERT(sizeof(HashNumber) == 4);
+ uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
+ return HashNumber(u ^ (u >> 32));
+ }
+ static bool match(double lhs, double rhs) {
+ return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
+ }
+};
+
+/*****************************************************************************/
+
+// Both HashMap and HashSet are implemented by a single HashTable that is even
+// more heavily parameterized than the other two. This leaves HashTable gnarly
+// and extremely coupled to HashMap and HashSet; thus code should not use
+// HashTable directly.
+
+template <class Key, class Value>
+class HashMapEntry
+{
+ template <class, class, class> friend class detail::HashTable;
+ template <class> friend class detail::HashTableEntry;
+
+ HashMapEntry(const HashMapEntry &) MOZ_DELETE;
+ void operator=(const HashMapEntry &) MOZ_DELETE;
+
+ public:
+ template<typename KeyInput, typename ValueInput>
+ HashMapEntry(const KeyInput &k, const ValueInput &v) : key(k), value(v) {}
+
+ HashMapEntry(MoveRef<HashMapEntry> rhs)
+ : key(Move(rhs->key)), value(Move(rhs->value)) { }
+
+ typedef Key KeyType;
+ typedef Value ValueType;
+
+ const Key key;
+ Value value;
+};
+
+} // namespace js
+
+namespace mozilla {
+
+template <typename T>
+struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {};
+
+template <typename K, typename V>
+struct IsPod<js::HashMapEntry<K, V> >
+ : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
+{};
+
+} // namespace mozilla
+
+namespace js {
+
+namespace detail {
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable;
+
+template <class T>
+class HashTableEntry
+{
+ template <class, class, class> friend class HashTable;
+ typedef typename mozilla::RemoveConst<T>::Type NonConstT;
+
+ HashNumber keyHash;
+ mozilla::AlignedStorage2<NonConstT> mem;
+
+ static const HashNumber sFreeKey = 0;
+ static const HashNumber sRemovedKey = 1;
+ static const HashNumber sCollisionBit = 1;
+
+ // Assumed by calloc in createTable.
+ JS_STATIC_ASSERT(sFreeKey == 0);
+
+ static bool isLiveHash(HashNumber hash)
+ {
+ return hash > sRemovedKey;
+ }
+
+ HashTableEntry(const HashTableEntry &) MOZ_DELETE;
+ void operator=(const HashTableEntry &) MOZ_DELETE;
+ ~HashTableEntry() MOZ_DELETE;
+
+ public:
+ // NB: HashTableEntry is treated as a POD: no constructor or destructor calls.
+
+ void destroyIfLive() {
+ if (isLive())
+ mem.addr()->~T();
+ }
+
+ void destroy() {
+ JS_ASSERT(isLive());
+ mem.addr()->~T();
+ }
+
+ void swap(HashTableEntry *other) {
+ Swap(keyHash, other->keyHash);
+ Swap(mem, other->mem);
+ }
+
+ T &get() { JS_ASSERT(isLive()); return *mem.addr(); }
+
+ bool isFree() const { return keyHash == sFreeKey; }
+ void clearLive() { JS_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); }
+ void clear() { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; }
+ bool isRemoved() const { return keyHash == sRemovedKey; }
+ void removeLive() { JS_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); }
+ bool isLive() const { return isLiveHash(keyHash); }
+ void setCollision() { JS_ASSERT(isLive()); keyHash |= sCollisionBit; }
+ void setCollision(HashNumber bit) { JS_ASSERT(isLive()); keyHash |= bit; }
+ void unsetCollision() { keyHash &= ~sCollisionBit; }
+ bool hasCollision() const { return keyHash & sCollisionBit; }
+ bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
+ HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
+
+ template <class U>
+ void setLive(HashNumber hn, const U &u)
+ {
+ JS_ASSERT(!isLive());
+ keyHash = hn;
+ new(mem.addr()) T(u);
+ JS_ASSERT(isLive());
+ }
+};
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable : private AllocPolicy
+{
+ typedef typename mozilla::RemoveConst<T>::Type NonConstT;
+ typedef typename HashPolicy::KeyType Key;
+ typedef typename HashPolicy::Lookup Lookup;
+
+ public:
+ typedef HashTableEntry<T> Entry;
+
+ // A nullable pointer to a hash table element. A Ptr |p| can be tested
+ // either explicitly |if (p.found()) p->...| or using boolean conversion
+ // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+ // table operations unless |generation()| is tested.
+ class Ptr
+ {
+ friend class HashTable;
+ typedef void (Ptr::* ConvertibleToBool)();
+ void nonNull() {}
+
+ Entry *entry_;
+
+ protected:
+ Ptr(Entry &entry) : entry_(&entry) {}
+
+ public:
+ // Leaves Ptr uninitialized.
+ Ptr() {
+#ifdef JS_DEBUG
+ entry_ = (Entry *)0xbad;
+#endif
+ }
+
+ bool found() const { return entry_->isLive(); }
+ operator ConvertibleToBool() const { return found() ? &Ptr::nonNull : 0; }
+ bool operator==(const Ptr &rhs) const { JS_ASSERT(found() && rhs.found()); return entry_ == rhs.entry_; }
+ bool operator!=(const Ptr &rhs) const { return !(*this == rhs); }
+
+ T &operator*() const { return entry_->get(); }
+ T *operator->() const { return &entry_->get(); }
+ };
+
+ // A Ptr that can be used to add a key after a failed lookup.
+ class AddPtr : public Ptr
+ {
+ friend class HashTable;
+ HashNumber keyHash;
+ mozilla::DebugOnly<uint64_t> mutationCount;
+
+ AddPtr(Entry &entry, HashNumber hn) : Ptr(entry), keyHash(hn) {}
+ public:
+ // Leaves AddPtr uninitialized.
+ AddPtr() {}
+ };
+
+ // A collection of hash table entries. The collection is enumerated by
+ // calling |front()| followed by |popFront()| as long as |!empty()|. As
+ // with Ptr/AddPtr, Range objects must not be used after any mutating hash
+ // table operation unless the |generation()| is tested.
+ class Range
+ {
+ protected:
+ friend class HashTable;
+
+ Range(Entry *c, Entry *e) : cur(c), end(e), validEntry(true) {
+ while (cur < end && !cur->isLive())
+ ++cur;
+ }
+
+ Entry *cur, *end;
+ mozilla::DebugOnly<bool> validEntry;
+
+ public:
+ Range() : cur(NULL), end(NULL), validEntry(false) {}
+
+ bool empty() const {
+ return cur == end;
+ }
+
+ T &front() const {
+ JS_ASSERT(validEntry);
+ JS_ASSERT(!empty());
+ return cur->get();
+ }
+
+ void popFront() {
+ JS_ASSERT(!empty());
+ while (++cur < end && !cur->isLive())
+ continue;
+ validEntry = true;
+ }
+ };
+
+ // A Range whose lifetime delimits a mutating enumeration of a hash table.
+ // Since rehashing when elements were removed during enumeration would be
+ // bad, it is postponed until the Enum is destructed. Since the Enum's
+ // destructor touches the hash table, the user must ensure that the hash
+ // table is still alive when the destructor runs.
+ class Enum : public Range
+ {
+ friend class HashTable;
+
+ HashTable &table;
+ bool rekeyed;
+ bool removed;
+
+ /* Not copyable. */
+ Enum(const Enum &);
+ void operator=(const Enum &);
+
+ public:
+ template<class Map> explicit
+ Enum(Map &map) : Range(map.all()), table(map.impl), rekeyed(false), removed(false) {}
+
+ // Removes the |front()| element from the table, leaving |front()|
+ // invalid until the next call to |popFront()|. For example:
+ //
+ // HashSet<int> s;
+ // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ void removeFront() {
+ table.remove(*this->cur);
+ removed = true;
+ this->validEntry = false;
+ }
+
+ // Removes the |front()| element and re-inserts it into the table with
+ // a new key at the new Lookup position. |front()| is invalid after
+ // this operation until the next call to |popFront()|.
+ void rekeyFront(const Lookup &l, const Key &k) {
+ typename HashTableEntry<T>::NonConstT t(Move(this->cur->get()));
+ HashPolicy::setKey(t, const_cast<Key &>(k));
+ table.remove(*this->cur);
+ table.putNewInfallible(l, Move(t));
+ rekeyed = true;
+ this->validEntry = false;
+ }
+
+ void rekeyFront(const Key &k) {
+ rekeyFront(k, k);
+ }
+
+ // Potentially rehashes the table.
+ ~Enum() {
+ if (rekeyed) {
+ table.gen++;
+ table.checkOverRemoved();
+ }
+
+ if (removed)
+ table.compactIfUnderloaded();
+ }
+ };
+
+ // HashTable is movable
+ HashTable(MoveRef<HashTable> rhs)
+ : AllocPolicy(*rhs)
+ {
+ mozilla::PodAssign(this, &*rhs);
+ rhs->table = NULL;
+ }
+ void operator=(MoveRef<HashTable> rhs) {
+ if (table)
+ destroyTable(*this, table, capacity());
+ mozilla::PodAssign(this, &*rhs);
+ rhs->table = NULL;
+ }
+
+ private:
+ // HashTable is not copyable or assignable
+ HashTable(const HashTable &) MOZ_DELETE;
+ void operator=(const HashTable &) MOZ_DELETE;
+
+ private:
+ uint32_t hashShift; // multiplicative hash shift
+ uint32_t entryCount; // number of entries in table
+ uint32_t gen; // entry storage generation number
+ uint32_t removedCount; // removed entry sentinels in table
+ Entry *table; // entry storage
+
+ void setTableSizeLog2(unsigned sizeLog2)
+ {
+ hashShift = sHashBits - sizeLog2;
+ }
+
+#ifdef JS_DEBUG
+ mutable struct Stats
+ {
+ uint32_t searches; // total number of table searches
+ uint32_t steps; // hash chain links traversed
+ uint32_t hits; // searches that found key
+ uint32_t misses; // searches that didn't find key
+ uint32_t addOverRemoved; // adds that recycled a removed entry
+ uint32_t removes; // calls to remove
+ uint32_t removeFrees; // calls to remove that freed the entry
+ uint32_t grows; // table expansions
+ uint32_t shrinks; // table contractions
+ uint32_t compresses; // table compressions
+ uint32_t rehashes; // tombstone decontaminations
+ } stats;
+# define METER(x) x
+#else
+# define METER(x)
+#endif
+
+ friend class js::ReentrancyGuard;
+ mutable mozilla::DebugOnly<bool> entered;
+ mozilla::DebugOnly<uint64_t> mutationCount;
+
+ // The default initial capacity is 32 (enough to hold 16 elements), but it
+ // can be as low as 4.
+ static const unsigned sMinCapacityLog2 = 2;
+ static const unsigned sMinCapacity = 1 << sMinCapacityLog2;
+ static const unsigned sMaxInit = JS_BIT(23);
+ static const unsigned sMaxCapacity = JS_BIT(24);
+ static const unsigned sHashBits = tl::BitSize<HashNumber>::result;
+ static const uint8_t sMinAlphaFrac = 64; // (0x100 * .25)
+ static const uint8_t sMaxAlphaFrac = 192; // (0x100 * .75)
+ static const uint8_t sInvMaxAlpha = 171; // (ceil(0x100 / .75) >> 1)
+ static const HashNumber sFreeKey = Entry::sFreeKey;
+ static const HashNumber sRemovedKey = Entry::sRemovedKey;
+ static const HashNumber sCollisionBit = Entry::sCollisionBit;
+
+ static void staticAsserts()
+ {
+ // Rely on compiler "constant overflow warnings".
+ JS_STATIC_ASSERT(((sMaxInit * sInvMaxAlpha) >> 7) < sMaxCapacity);
+ JS_STATIC_ASSERT((sMaxCapacity * sInvMaxAlpha) <= UINT32_MAX);
+ JS_STATIC_ASSERT((sMaxCapacity * sizeof(Entry)) <= UINT32_MAX);
+ }
+
+ static bool isLiveHash(HashNumber hash)
+ {
+ return Entry::isLiveHash(hash);
+ }
+
+ static HashNumber prepareHash(const Lookup& l)
+ {
+ HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
+
+ // Avoid reserved hash codes.
+ if (!isLiveHash(keyHash))
+ keyHash -= (sRemovedKey + 1);
+ return keyHash & ~sCollisionBit;
+ }
+
+ static Entry *createTable(AllocPolicy &alloc, uint32_t capacity)
+ {
+ // See JS_STATIC_ASSERT(sFreeKey == 0) in HashTableEntry.
+ return (Entry *)alloc.calloc_(capacity * sizeof(Entry));
+ }
+
+ static void destroyTable(AllocPolicy &alloc, Entry *oldTable, uint32_t capacity)
+ {
+ for (Entry *e = oldTable, *end = e + capacity; e < end; ++e)
+ e->destroyIfLive();
+ alloc.free_(oldTable);
+ }
+
+ public:
+ HashTable(AllocPolicy ap)
+ : AllocPolicy(ap),
+ hashShift(sHashBits),
+ entryCount(0),
+ gen(0),
+ removedCount(0),
+ table(NULL),
+ entered(false),
+ mutationCount(0)
+ {}
+
+ MOZ_WARN_UNUSED_RESULT bool init(uint32_t length)
+ {
+ JS_ASSERT(!initialized());
+
+ // Correct for sMaxAlphaFrac such that the table will not resize
+ // when adding 'length' entries.
+ if (length > sMaxInit) {
+ this->reportAllocOverflow();
+ return false;
+ }
+ uint32_t newCapacity = (length * sInvMaxAlpha) >> 7;
+
+ if (newCapacity < sMinCapacity)
+ newCapacity = sMinCapacity;
+
+ // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
+ uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
+ while (roundUp < newCapacity) {
+ roundUp <<= 1;
+ ++roundUpLog2;
+ }
+
+ newCapacity = roundUp;
+ JS_ASSERT(newCapacity <= sMaxCapacity);
+
+ table = createTable(*this, newCapacity);
+ if (!table)
+ return false;
+
+ setTableSizeLog2(roundUpLog2);
+ METER(memset(&stats, 0, sizeof(stats)));
+ return true;
+ }
+
+ bool initialized() const
+ {
+ return !!table;
+ }
+
+ ~HashTable()
+ {
+ if (table)
+ destroyTable(*this, table, capacity());
+ }
+
+ private:
+ HashNumber hash1(HashNumber hash0) const
+ {
+ return hash0 >> hashShift;
+ }
+
+ struct DoubleHash
+ {
+ HashNumber h2;
+ HashNumber sizeMask;
+ };
+
+ DoubleHash hash2(HashNumber curKeyHash) const
+ {
+ unsigned sizeLog2 = sHashBits - hashShift;
+ DoubleHash dh = {
+ ((curKeyHash << sizeLog2) >> hashShift) | 1,
+ (HashNumber(1) << sizeLog2) - 1
+ };
+ return dh;
+ }
+
+ static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash &dh)
+ {
+ return (h1 - dh.h2) & dh.sizeMask;
+ }
+
+ bool overloaded()
+ {
+ return entryCount + removedCount >= ((sMaxAlphaFrac * capacity()) >> 8);
+ }
+
+ // Would the table be underloaded if it had the given capacity and entryCount?
+ static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+ {
+ return capacity > sMinCapacity && entryCount <= ((sMinAlphaFrac * capacity) >> 8);
+ }
+
+ bool underloaded()
+ {
+ return wouldBeUnderloaded(capacity(), entryCount);
+ }
+
+ static bool match(Entry &e, const Lookup &l)
+ {
+ return HashPolicy::match(HashPolicy::getKey(e.get()), l);
+ }
+
+ Entry &lookup(const Lookup &l, HashNumber keyHash, unsigned collisionBit) const
+ {
+ JS_ASSERT(isLiveHash(keyHash));
+ JS_ASSERT(!(keyHash & sCollisionBit));
+ JS_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
+ JS_ASSERT(table);
+ METER(stats.searches++);
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry *entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Hit: return entry.
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ // Save the first removed entry pointer so we can recycle later.
+ Entry *firstRemoved = NULL;
+
+ while(true) {
+ if (JS_UNLIKELY(entry->isRemoved())) {
+ if (!firstRemoved)
+ firstRemoved = entry;
+ } else {
+ entry->setCollision(collisionBit);
+ }
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return firstRemoved ? *firstRemoved : *entry;
+ }
+
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+ }
+ }
+
+ // This is a copy of lookup hardcoded to the assumptions:
+ // 1. the lookup is a lookupForAdd
+ // 2. the key, whose |keyHash| has been passed is not in the table,
+ // 3. no entries have been removed from the table.
+ // This specialized search avoids the need for recovering lookup values
+ // from entries, which allows more flexible Lookup/Key types.
+ Entry &findFreeEntry(HashNumber keyHash)
+ {
+ JS_ASSERT(!(keyHash & sCollisionBit));
+ JS_ASSERT(table);
+ METER(stats.searches++);
+
+ // We assume 'keyHash' has already been distributed.
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry *entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ while(true) {
+ JS_ASSERT(!entry->isRemoved());
+ entry->setCollision();
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+ }
+ }
+
+ enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+
+ RebuildStatus changeTableSize(int deltaLog2)
+ {
+ // Look, but don't touch, until we succeed in getting new entry store.
+ Entry *oldTable = table;
+ uint32_t oldCap = capacity();
+ uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
+ uint32_t newCapacity = JS_BIT(newLog2);
+ if (newCapacity > sMaxCapacity) {
+ this->reportAllocOverflow();
+ return RehashFailed;
+ }
+
+ Entry *newTable = createTable(*this, newCapacity);
+ if (!newTable)
+ return RehashFailed;
+
+ // We can't fail from here on, so update table parameters.
+ setTableSizeLog2(newLog2);
+ removedCount = 0;
+ gen++;
+ table = newTable;
+
+ // Copy only live entries, leaving removed ones behind.
+ for (Entry *src = oldTable, *end = src + oldCap; src < end; ++src) {
+ if (src->isLive()) {
+ HashNumber hn = src->getKeyHash();
+ findFreeEntry(hn).setLive(hn, Move(src->get()));
+ src->destroy();
+ }
+ }
+
+ // All entries have been destroyed, no need to destroyTable.
+ this->free_(oldTable);
+ return Rehashed;
+ }
+
+ RebuildStatus checkOverloaded()
+ {
+ if (!overloaded())
+ return NotOverloaded;
+
+ // Compress if a quarter or more of all entries are removed.
+ int deltaLog2;
+ if (removedCount >= (capacity() >> 2)) {
+ METER(stats.compresses++);
+ deltaLog2 = 0;
+ } else {
+ METER(stats.grows++);
+ deltaLog2 = 1;
+ }
+
+ return changeTableSize(deltaLog2);
+ }
+
+ // Infallibly rehash the table if we are overloaded with removals.
+ void checkOverRemoved()
+ {
+ if (overloaded()) {
+ if (checkOverloaded() == RehashFailed)
+ rehashTableInPlace();
+ }
+ }
+
+ void remove(Entry &e)
+ {
+ JS_ASSERT(table);
+ METER(stats.removes++);
+
+ if (e.hasCollision()) {
+ e.removeLive();
+ removedCount++;
+ } else {
+ METER(stats.removeFrees++);
+ e.clearLive();
+ }
+ entryCount--;
+ mutationCount++;
+ }
+
+ void checkUnderloaded()
+ {
+ if (underloaded()) {
+ METER(stats.shrinks++);
+ (void) changeTableSize(-1);
+ }
+ }
+
+ // Resize the table down to the largest capacity which doesn't underload the
+ // table. Since we call checkUnderloaded() on every remove, you only need
+ // to call this after a bulk removal of items done without calling remove().
+ void compactIfUnderloaded()
+ {
+ int32_t resizeLog2 = 0;
+ uint32_t newCapacity = capacity();
+ while (wouldBeUnderloaded(newCapacity, entryCount)) {
+ newCapacity = newCapacity >> 1;
+ resizeLog2--;
+ }
+
+ if (resizeLog2 != 0) {
+ changeTableSize(resizeLog2);
+ }
+ }
+
+ // This is identical to changeTableSize(currentSize), but without requiring
+ // a second table. We do this by recycling the collision bits to tell us if
+ // the element is already inserted or still waiting to be inserted. Since
+ // already-inserted elements win any conflicts, we get the same table as we
+ // would have gotten through random insertion order.
+ void rehashTableInPlace()
+ {
+ METER(stats.rehashes++);
+ removedCount = 0;
+ for (size_t i = 0; i < capacity(); ++i)
+ table[i].unsetCollision();
+
+ for (size_t i = 0; i < capacity();) {
+ Entry *src = &table[i];
+
+ if (!src->isLive() || src->hasCollision()) {
+ ++i;
+ continue;
+ }
+
+ HashNumber keyHash = src->getKeyHash();
+ HashNumber h1 = hash1(keyHash);
+ DoubleHash dh = hash2(keyHash);
+ Entry *tgt = &table[h1];
+ while (true) {
+ if (!tgt->hasCollision()) {
+ src->swap(tgt);
+ tgt->setCollision();
+ break;
+ }
+
+ h1 = applyDoubleHash(h1, dh);
+ tgt = &table[h1];
+ }
+ }
+
+ // TODO: this algorithm leaves collision bits on *all* elements, even if
+ // they are on no collision path. We have the option of setting the
+ // collision bits correctly on a subsequent pass or skipping the rehash
+ // unless we are totally filled with tombstones: benchmark to find out
+ // which approach is best.
+ }
+
+ public:
+ void clear()
+ {
+ if (mozilla::IsPod<Entry>::value) {
+ memset(table, 0, sizeof(*table) * capacity());
+ } else {
+ uint32_t tableCapacity = capacity();
+ for (Entry *e = table, *end = table + tableCapacity; e < end; ++e)
+ e->clear();
+ }
+ removedCount = 0;
+ entryCount = 0;
+ mutationCount++;
+ }
+
+ void finish()
+ {
+ JS_ASSERT(!entered);
+
+ if (!table)
+ return;
+
+ destroyTable(*this, table, capacity());
+ table = NULL;
+ gen++;
+ entryCount = 0;
+ removedCount = 0;
+ mutationCount++;
+ }
+
+ Range all() const
+ {
+ JS_ASSERT(table);
+ return Range(table, table + capacity());
+ }
+
+ bool empty() const
+ {
+ JS_ASSERT(table);
+ return !entryCount;
+ }
+
+ uint32_t count() const
+ {
+ JS_ASSERT(table);
+ return entryCount;
+ }
+
+ uint32_t capacity() const
+ {
+ JS_ASSERT(table);
+ return JS_BIT(sHashBits - hashShift);
+ }
+
+ uint32_t generation() const
+ {
+ JS_ASSERT(table);
+ return gen;
+ }
+
+ size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const
+ {
+ return mallocSizeOf(table);
+ }
+
+ size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const
+ {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ Ptr lookup(const Lookup &l) const
+ {
+ ReentrancyGuard g(*this);
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0));
+ }
+
+ Ptr readonlyThreadsafeLookup(const Lookup &l) const
+ {
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0));
+ }
+
+ AddPtr lookupForAdd(const Lookup &l) const
+ {
+ ReentrancyGuard g(*this);
+ HashNumber keyHash = prepareHash(l);
+ Entry &entry = lookup(l, keyHash, sCollisionBit);
+ AddPtr p(entry, keyHash);
+ p.mutationCount = mutationCount;
+ return p;
+ }
+
+ template <class U>
+ bool add(AddPtr &p, const U &rhs)
+ {
+ ReentrancyGuard g(*this);
+ JS_ASSERT(mutationCount == p.mutationCount);
+ JS_ASSERT(table);
+ JS_ASSERT(!p.found());
+ JS_ASSERT(!(p.keyHash & sCollisionBit));
+
+ // Changing an entry from removed to live does not affect whether we
+ // are overloaded and can be handled separately.
+ if (p.entry_->isRemoved()) {
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ p.keyHash |= sCollisionBit;
+ } else {
+ // Preserve the validity of |p.entry_|.
+ RebuildStatus status = checkOverloaded();
+ if (status == RehashFailed)
+ return false;
+ if (status == Rehashed)
+ p.entry_ = &findFreeEntry(p.keyHash);
+ }
+
+ p.entry_->setLive(p.keyHash, rhs);
+ entryCount++;
+ mutationCount++;
+ return true;
+ }
+
+ template <class U>
+ void putNewInfallible(const Lookup &l, const U &u)
+ {
+ JS_ASSERT(table);
+
+ HashNumber keyHash = prepareHash(l);
+ Entry *entry = &findFreeEntry(keyHash);
+
+ if (entry->isRemoved()) {
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ keyHash |= sCollisionBit;
+ }
+
+ entry->setLive(keyHash, u);
+ entryCount++;
+ mutationCount++;
+ }
+
+ template <class U>
+ bool putNew(const Lookup &l, const U &u)
+ {
+ if (checkOverloaded() == RehashFailed)
+ return false;
+
+ putNewInfallible(l, u);
+ return true;
+ }
+
+ template <class U>
+ bool relookupOrAdd(AddPtr& p, const Lookup &l, const U &u)
+ {
+ p.mutationCount = mutationCount;
+ {
+ ReentrancyGuard g(*this);
+ p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
+ }
+ return p.found() || add(p, u);
+ }
+
+ void remove(Ptr p)
+ {
+ JS_ASSERT(table);
+ ReentrancyGuard g(*this);
+ JS_ASSERT(p.found());
+ remove(*p.entry_);
+ checkUnderloaded();
+ }
+
+#undef METER
+};
+
+} // namespace detail
+} // namespace js
+
+#endif /* js_HashTable_h */
diff --git a/js/public/HeapAPI.h b/js/public/HeapAPI.h
new file mode 100644
index 0000000..37cadfa
--- /dev/null
+++ b/js/public/HeapAPI.h
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_HeapAPI_h
+#define js_HeapAPI_h
+
+#include "jspubtd.h"
+
+/* These values are private to the JS engine. */
+namespace js {
+namespace gc {
+
+const size_t ArenaShift = 12;
+const size_t ArenaSize = size_t(1) << ArenaShift;
+const size_t ArenaMask = ArenaSize - 1;
+
+const size_t ChunkShift = 20;
+const size_t ChunkSize = size_t(1) << ChunkShift;
+const size_t ChunkMask = ChunkSize - 1;
+
+const size_t CellShift = 3;
+const size_t CellSize = size_t(1) << CellShift;
+const size_t CellMask = CellSize - 1;
+
+/* These are magic constants derived from actual offsets in gc/Heap.h. */
+const size_t ChunkMarkBitmapOffset = 1032368;
+const size_t ChunkMarkBitmapBits = 129024;
+const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
+
+/*
+ * Live objects are marked black. How many other additional colors are available
+ * depends on the size of the GCThing. Objects marked gray are eligible for
+ * cycle collection.
+ */
+static const uint32_t BLACK = 0;
+static const uint32_t GRAY = 1;
+
+} /* namespace gc */
+} /* namespace js */
+
+namespace JS {
+struct Zone;
+} /* namespace JS */
+
+namespace JS {
+namespace shadow {
+
+struct ArenaHeader
+{
+ js::Zone *zone;
+};
+
+struct Zone
+{
+ bool needsBarrier_;
+
+ Zone() : needsBarrier_(false) {}
+};
+
+} /* namespace shadow */
+} /* namespace JS */
+
+namespace js {
+namespace gc {
+
+static JS_ALWAYS_INLINE uintptr_t *
+GetGCThingMarkBitmap(const void *thing)
+{
+ uintptr_t addr = uintptr_t(thing);
+ addr &= ~js::gc::ChunkMask;
+ addr |= js::gc::ChunkMarkBitmapOffset;
+ return reinterpret_cast<uintptr_t *>(addr);
+}
+
+static JS_ALWAYS_INLINE JS::shadow::Runtime *
+GetGCThingRuntime(const void *thing)
+{
+ uintptr_t addr = uintptr_t(thing);
+ addr &= ~js::gc::ChunkMask;
+ addr |= js::gc::ChunkRuntimeOffset;
+ return *reinterpret_cast<JS::shadow::Runtime **>(addr);
+}
+
+static JS_ALWAYS_INLINE void
+GetGCThingMarkWordAndMask(const void *thing, uint32_t color,
+ uintptr_t **wordp, uintptr_t *maskp)
+{
+ uintptr_t addr = uintptr_t(thing);
+ size_t bit = (addr & js::gc::ChunkMask) / js::gc::CellSize + color;
+ JS_ASSERT(bit < js::gc::ChunkMarkBitmapBits);
+ uintptr_t *bitmap = GetGCThingMarkBitmap(thing);
+ *maskp = uintptr_t(1) << (bit % JS_BITS_PER_WORD);
+ *wordp = &bitmap[bit / JS_BITS_PER_WORD];
+}
+
+static JS_ALWAYS_INLINE JS::shadow::ArenaHeader *
+GetGCThingArena(void *thing)
+{
+ uintptr_t addr = uintptr_t(thing);
+ addr &= ~js::gc::ArenaMask;
+ return reinterpret_cast<JS::shadow::ArenaHeader *>(addr);
+}
+
+} /* namespace gc */
+
+} /* namespace js */
+
+namespace JS {
+
+static JS_ALWAYS_INLINE Zone *
+GetGCThingZone(void *thing)
+{
+ JS_ASSERT(thing);
+ return js::gc::GetGCThingArena(thing)->zone;
+}
+
+static JS_ALWAYS_INLINE Zone *
+GetObjectZone(JSObject *obj)
+{
+ return GetGCThingZone(obj);
+}
+
+static JS_ALWAYS_INLINE bool
+GCThingIsMarkedGray(void *thing)
+{
+ uintptr_t *word, mask;
+ js::gc::GetGCThingMarkWordAndMask(thing, js::gc::GRAY, &word, &mask);
+ return *word & mask;
+}
+
+static JS_ALWAYS_INLINE bool
+IsIncrementalBarrierNeededOnGCThing(shadow::Runtime *rt, void *thing, JSGCTraceKind kind)
+{
+ if (!rt->needsBarrier_)
+ return false;
+ js::Zone *zone = GetGCThingZone(thing);
+ return reinterpret_cast<shadow::Zone *>(zone)->needsBarrier_;
+}
+
+} /* namespace JS */
+
+#endif /* js_HeapAPI_h */
diff --git a/js/public/LegacyIntTypes.h b/js/public/LegacyIntTypes.h
new file mode 100644
index 0000000..903f2e6
--- /dev/null
+++ b/js/public/LegacyIntTypes.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This section typedefs the old 'native' types to the new <stdint.h> types.
+ * These redefinitions are provided solely to allow JSAPI users to more easily
+ * transition to <stdint.h> types. They are not to be used in the JSAPI, and
+ * new JSAPI user code should not use them. This mapping file may eventually
+ * be removed from SpiderMonkey, so don't depend on it in the long run.
+ */
+
+/*
+ * BEWARE: Comity with other implementers of these types is not guaranteed.
+ * Indeed, if you use this header and third-party code defining these
+ * types, *expect* to encounter either compile errors or link errors,
+ * depending how these types are used and on the order of inclusion.
+ * It is safest to use only the JSAPI <stdint.h>-style types,
+ * customizing those types using MOZ_CUSTOM_STDINT_H if necessary.
+ */
+#ifndef js_LegacyIntTypes_h
+#define js_LegacyIntTypes_h
+
+#include "mozilla/StandardInteger.h"
+
+#include "js-config.h"
+
+typedef uint8_t uint8;
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint64_t uint64;
+
+/*
+ * On AIX 4.3, sys/inttypes.h (which is included by sys/types.h, a very
+ * common header file) defines the types int8, int16, int32, and int64.
+ * So we don't define these four types here to avoid conflicts in case
+ * the code also includes sys/types.h.
+ */
+#if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
+#include <sys/inttypes.h>
+#else
+typedef int8_t int8;
+typedef int16_t int16;
+typedef int32_t int32;
+typedef int64_t int64;
+#endif /* AIX && HAVE_SYS_INTTYPES_H */
+
+typedef uint8_t JSUint8;
+typedef uint16_t JSUint16;
+typedef uint32_t JSUint32;
+typedef uint64_t JSUint64;
+
+typedef int8_t JSInt8;
+typedef int16_t JSInt16;
+typedef int32_t JSInt32;
+typedef int64_t JSInt64;
+
+#endif /* js_LegacyIntTypes_h */
diff --git a/js/public/MemoryMetrics.h b/js/public/MemoryMetrics.h
new file mode 100644
index 0000000..7fdf0d3
--- /dev/null
+++ b/js/public/MemoryMetrics.h
@@ -0,0 +1,460 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_MemoryMetrics_h
+#define js_MemoryMetrics_h
+
+// These declarations are not within jsapi.h because they are highly likely to
+// change in the future. Depend on them at your own risk.
+
+#include <string.h>
+
+#include "jsalloc.h"
+#include "jspubtd.h"
+
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+class nsISupports; // This is needed for ObjectPrivateVisitor.
+
+namespace js {
+
+// In memory reporting, we have concept of "sundries", line items which are too
+// small to be worth reporting individually. Under some circumstances, a memory
+// reporter gets tossed into the sundries bucket if it's smaller than
+// MemoryReportingSundriesThreshold() bytes.
+//
+// We need to define this value here, rather than in the code which actually
+// generates the memory reports, because HugeStringInfo uses this value.
+JS_FRIEND_API(size_t) MemoryReportingSundriesThreshold();
+
+} // namespace js
+
+namespace JS {
+
+// Data for tracking memory usage of things hanging off objects.
+struct ObjectsExtraSizes
+{
+ size_t slots;
+ size_t elementsNonAsmJS;
+ size_t elementsAsmJSHeap;
+ size_t elementsAsmJSNonHeap;
+ size_t argumentsData;
+ size_t regExpStatics;
+ size_t propertyIteratorData;
+ size_t ctypesData;
+ size_t private_; // The '_' suffix is required because |private| is a keyword.
+ // Note that this field is measured separately from the others.
+
+ ObjectsExtraSizes() { memset(this, 0, sizeof(ObjectsExtraSizes)); }
+
+ void add(ObjectsExtraSizes &sizes) {
+ this->slots += sizes.slots;
+ this->elementsNonAsmJS += sizes.elementsNonAsmJS;
+ this->elementsAsmJSHeap += sizes.elementsAsmJSHeap;
+ this->elementsAsmJSNonHeap += sizes.elementsAsmJSNonHeap;
+ this->argumentsData += sizes.argumentsData;
+ this->regExpStatics += sizes.regExpStatics;
+ this->propertyIteratorData += sizes.propertyIteratorData;
+ this->ctypesData += sizes.ctypesData;
+ this->private_ += sizes.private_;
+ }
+};
+
+// Data for tracking analysis/inference memory usage.
+struct TypeInferenceSizes
+{
+ size_t typeScripts;
+ size_t typeResults;
+ size_t analysisPool;
+ size_t pendingArrays;
+ size_t allocationSiteTables;
+ size_t arrayTypeTables;
+ size_t objectTypeTables;
+
+ TypeInferenceSizes() { memset(this, 0, sizeof(TypeInferenceSizes)); }
+
+ void add(TypeInferenceSizes &sizes) {
+ this->typeScripts += sizes.typeScripts;
+ this->typeResults += sizes.typeResults;
+ this->analysisPool += sizes.analysisPool;
+ this->pendingArrays += sizes.pendingArrays;
+ this->allocationSiteTables += sizes.allocationSiteTables;
+ this->arrayTypeTables += sizes.arrayTypeTables;
+ this->objectTypeTables += sizes.objectTypeTables;
+ }
+};
+
+// Data for tracking JIT-code memory usage.
+struct CodeSizes
+{
+ size_t ion;
+ size_t asmJS;
+ size_t baseline;
+ size_t regexp;
+ size_t other;
+ size_t unused;
+
+ CodeSizes() { memset(this, 0, sizeof(CodeSizes)); }
+};
+
+// Holds data about a huge string (one which uses more HugeStringInfo::MinSize
+// bytes of memory), so we can report it individually.
+struct HugeStringInfo
+{
+ HugeStringInfo() : length(0), size(0) { memset(&buffer, 0, sizeof(buffer)); }
+
+ // A string needs to take up this many bytes of storage before we consider
+ // it to be "huge".
+ static size_t MinSize() {
+ return js::MemoryReportingSundriesThreshold();
+ }
+
+ // A string's size in memory is not necessarily equal to twice its length
+ // because the allocator and the JS engine both may round up.
+ size_t length;
+ size_t size;
+
+ // We record the first 32 chars of the escaped string here. (We escape the
+ // string so we can use a char[] instead of a jschar[] here.
+ char buffer[32];
+};
+
+// These measurements relate directly to the JSRuntime, and not to
+// compartments within it.
+struct RuntimeSizes
+{
+ RuntimeSizes() { memset(this, 0, sizeof(RuntimeSizes)); }
+
+ size_t object;
+ size_t atomsTable;
+ size_t contexts;
+ size_t dtoa;
+ size_t temporary;
+ size_t regexpData;
+ size_t interpreterStack;
+ size_t gcMarker;
+ size_t mathCache;
+ size_t scriptData;
+ size_t scriptSources;
+
+ CodeSizes code;
+};
+
+struct ZoneStats
+{
+ ZoneStats()
+ : extra(NULL),
+ gcHeapArenaAdmin(0),
+ gcHeapUnusedGcThings(0),
+ gcHeapStringsNormal(0),
+ gcHeapStringsShort(0),
+ gcHeapLazyScripts(0),
+ gcHeapTypeObjects(0),
+ gcHeapIonCodes(0),
+ stringCharsNonHuge(0),
+ lazyScripts(0),
+ typeObjects(0),
+ typePool(0),
+ hugeStrings()
+ {}
+
+ ZoneStats(const ZoneStats &other)
+ : extra(other.extra),
+ gcHeapArenaAdmin(other.gcHeapArenaAdmin),
+ gcHeapUnusedGcThings(other.gcHeapUnusedGcThings),
+ gcHeapStringsNormal(other.gcHeapStringsNormal),
+ gcHeapStringsShort(other.gcHeapStringsShort),
+ gcHeapLazyScripts(other.gcHeapLazyScripts),
+ gcHeapTypeObjects(other.gcHeapTypeObjects),
+ gcHeapIonCodes(other.gcHeapIonCodes),
+ stringCharsNonHuge(other.stringCharsNonHuge),
+ lazyScripts(other.lazyScripts),
+ typeObjects(other.typeObjects),
+ typePool(other.typePool),
+ hugeStrings()
+ {
+ hugeStrings.append(other.hugeStrings);
+ }
+
+ // Add other's numbers to this object's numbers.
+ void add(ZoneStats &other) {
+ #define ADD(x) this->x += other.x
+
+ ADD(gcHeapArenaAdmin);
+ ADD(gcHeapUnusedGcThings);
+
+ ADD(gcHeapStringsNormal);
+ ADD(gcHeapStringsShort);
+ ADD(gcHeapLazyScripts);
+ ADD(gcHeapTypeObjects);
+ ADD(gcHeapIonCodes);
+
+ ADD(stringCharsNonHuge);
+ ADD(lazyScripts);
+ ADD(typeObjects);
+ ADD(typePool);
+
+ #undef ADD
+
+ hugeStrings.append(other.hugeStrings);
+ }
+
+ // This field can be used by embedders.
+ void *extra;
+
+ size_t gcHeapArenaAdmin;
+ size_t gcHeapUnusedGcThings;
+
+ size_t gcHeapStringsNormal;
+ size_t gcHeapStringsShort;
+
+ size_t gcHeapLazyScripts;
+ size_t gcHeapTypeObjects;
+ size_t gcHeapIonCodes;
+
+ size_t stringCharsNonHuge;
+ size_t lazyScripts;
+ size_t typeObjects;
+ size_t typePool;
+
+ js::Vector<HugeStringInfo, 0, js::SystemAllocPolicy> hugeStrings;
+
+ // The size of all the live things in the GC heap that don't belong to any
+ // compartment.
+ size_t GCHeapThingsSize();
+};
+
+struct CompartmentStats
+{
+ CompartmentStats()
+ : extra(NULL),
+ gcHeapObjectsOrdinary(0),
+ gcHeapObjectsFunction(0),
+ gcHeapObjectsDenseArray(0),
+ gcHeapObjectsSlowArray(0),
+ gcHeapObjectsCrossCompartmentWrapper(0),
+ gcHeapShapesTreeGlobalParented(0),
+ gcHeapShapesTreeNonGlobalParented(0),
+ gcHeapShapesDict(0),
+ gcHeapShapesBase(0),
+ gcHeapScripts(0),
+ objectsExtra(),
+ shapesExtraTreeTables(0),
+ shapesExtraDictTables(0),
+ shapesExtraTreeShapeKids(0),
+ shapesCompartmentTables(0),
+ scriptData(0),
+ baselineData(0),
+ baselineStubsFallback(0),
+ baselineStubsOptimized(0),
+ ionData(0),
+ compartmentObject(0),
+ crossCompartmentWrappersTable(0),
+ regexpCompartment(0),
+ debuggeesSet(0),
+ typeInference()
+ {}
+
+ CompartmentStats(const CompartmentStats &other)
+ : extra(other.extra),
+ gcHeapObjectsOrdinary(other.gcHeapObjectsOrdinary),
+ gcHeapObjectsFunction(other.gcHeapObjectsFunction),
+ gcHeapObjectsDenseArray(other.gcHeapObjectsDenseArray),
+ gcHeapObjectsSlowArray(other.gcHeapObjectsSlowArray),
+ gcHeapObjectsCrossCompartmentWrapper(other.gcHeapObjectsCrossCompartmentWrapper),
+ gcHeapShapesTreeGlobalParented(other.gcHeapShapesTreeGlobalParented),
+ gcHeapShapesTreeNonGlobalParented(other.gcHeapShapesTreeNonGlobalParented),
+ gcHeapShapesDict(other.gcHeapShapesDict),
+ gcHeapShapesBase(other.gcHeapShapesBase),
+ gcHeapScripts(other.gcHeapScripts),
+ objectsExtra(other.objectsExtra),
+ shapesExtraTreeTables(other.shapesExtraTreeTables),
+ shapesExtraDictTables(other.shapesExtraDictTables),
+ shapesExtraTreeShapeKids(other.shapesExtraTreeShapeKids),
+ shapesCompartmentTables(other.shapesCompartmentTables),
+ scriptData(other.scriptData),
+ baselineData(other.baselineData),
+ baselineStubsFallback(other.baselineStubsFallback),
+ baselineStubsOptimized(other.baselineStubsOptimized),
+ ionData(other.ionData),
+ compartmentObject(other.compartmentObject),
+ crossCompartmentWrappersTable(other.crossCompartmentWrappersTable),
+ regexpCompartment(other.regexpCompartment),
+ debuggeesSet(other.debuggeesSet),
+ typeInference(other.typeInference)
+ {
+ }
+
+ // This field can be used by embedders.
+ void *extra;
+
+ // If you add a new number, remember to update the constructors, add(), and
+ // maybe gcHeapThingsSize()!
+ size_t gcHeapObjectsOrdinary;
+ size_t gcHeapObjectsFunction;
+ size_t gcHeapObjectsDenseArray;
+ size_t gcHeapObjectsSlowArray;
+ size_t gcHeapObjectsCrossCompartmentWrapper;
+ size_t gcHeapShapesTreeGlobalParented;
+ size_t gcHeapShapesTreeNonGlobalParented;
+ size_t gcHeapShapesDict;
+ size_t gcHeapShapesBase;
+ size_t gcHeapScripts;
+ ObjectsExtraSizes objectsExtra;
+
+ size_t shapesExtraTreeTables;
+ size_t shapesExtraDictTables;
+ size_t shapesExtraTreeShapeKids;
+ size_t shapesCompartmentTables;
+ size_t scriptData;
+ size_t baselineData;
+ size_t baselineStubsFallback;
+ size_t baselineStubsOptimized;
+ size_t ionData;
+ size_t compartmentObject;
+ size_t crossCompartmentWrappersTable;
+ size_t regexpCompartment;
+ size_t debuggeesSet;
+
+ TypeInferenceSizes typeInference;
+
+ // Add cStats's numbers to this object's numbers.
+ void add(CompartmentStats &cStats) {
+ #define ADD(x) this->x += cStats.x
+
+ ADD(gcHeapObjectsOrdinary);
+ ADD(gcHeapObjectsFunction);
+ ADD(gcHeapObjectsDenseArray);
+ ADD(gcHeapObjectsSlowArray);
+ ADD(gcHeapObjectsCrossCompartmentWrapper);
+ ADD(gcHeapShapesTreeGlobalParented);
+ ADD(gcHeapShapesTreeNonGlobalParented);
+ ADD(gcHeapShapesDict);
+ ADD(gcHeapShapesBase);
+ ADD(gcHeapScripts);
+ objectsExtra.add(cStats.objectsExtra);
+
+ ADD(shapesExtraTreeTables);
+ ADD(shapesExtraDictTables);
+ ADD(shapesExtraTreeShapeKids);
+ ADD(shapesCompartmentTables);
+ ADD(scriptData);
+ ADD(baselineData);
+ ADD(baselineStubsFallback);
+ ADD(baselineStubsOptimized);
+ ADD(ionData);
+ ADD(compartmentObject);
+ ADD(crossCompartmentWrappersTable);
+ ADD(regexpCompartment);
+ ADD(debuggeesSet);
+
+ #undef ADD
+
+ typeInference.add(cStats.typeInference);
+ }
+
+ // The size of all the live things in the GC heap.
+ size_t GCHeapThingsSize();
+};
+
+struct RuntimeStats
+{
+ RuntimeStats(JSMallocSizeOfFun mallocSizeOf)
+ : runtime(),
+ gcHeapChunkTotal(0),
+ gcHeapDecommittedArenas(0),
+ gcHeapUnusedChunks(0),
+ gcHeapUnusedArenas(0),
+ gcHeapUnusedGcThings(0),
+ gcHeapChunkAdmin(0),
+ gcHeapGcThings(0),
+ cTotals(),
+ zTotals(),
+ compartmentStatsVector(),
+ zoneStatsVector(),
+ currZoneStats(NULL),
+ mallocSizeOf_(mallocSizeOf)
+ {}
+
+ RuntimeSizes runtime;
+
+ // If you add a new number, remember to update the constructor!
+
+ // Here's a useful breakdown of the GC heap.
+ //
+ // - rtStats.gcHeapChunkTotal
+ // - decommitted bytes
+ // - rtStats.gcHeapDecommittedArenas (decommitted arenas in non-empty chunks)
+ // - unused bytes
+ // - rtStats.gcHeapUnusedChunks (empty chunks)
+ // - rtStats.gcHeapUnusedArenas (empty arenas within non-empty chunks)
+ // - rtStats.total.gcHeapUnusedGcThings (empty GC thing slots within non-empty arenas)
+ // - used bytes
+ // - rtStats.gcHeapChunkAdmin
+ // - rtStats.total.gcHeapArenaAdmin
+ // - rtStats.gcHeapGcThings (in-use GC things)
+ //
+ // It's possible that some arenas in empty chunks may be decommitted, but
+ // we don't count those under rtStats.gcHeapDecommittedArenas because (a)
+ // it's rare, and (b) this means that rtStats.gcHeapUnusedChunks is a
+ // multiple of the chunk size, which is good.
+
+ size_t gcHeapChunkTotal;
+ size_t gcHeapDecommittedArenas;
+ size_t gcHeapUnusedChunks;
+ size_t gcHeapUnusedArenas;
+ size_t gcHeapUnusedGcThings;
+ size_t gcHeapChunkAdmin;
+ size_t gcHeapGcThings;
+
+ // The sum of all compartment's measurements.
+ CompartmentStats cTotals;
+ ZoneStats zTotals;
+
+ js::Vector<CompartmentStats, 0, js::SystemAllocPolicy> compartmentStatsVector;
+ js::Vector<ZoneStats, 0, js::SystemAllocPolicy> zoneStatsVector;
+
+ ZoneStats *currZoneStats;
+
+ JSMallocSizeOfFun mallocSizeOf_;
+
+ virtual void initExtraCompartmentStats(JSCompartment *c, CompartmentStats *cstats) = 0;
+ virtual void initExtraZoneStats(JS::Zone *zone, ZoneStats *zstats) = 0;
+};
+
+class ObjectPrivateVisitor
+{
+ public:
+ // Within CollectRuntimeStats, this method is called for each JS object
+ // that has an nsISupports pointer.
+ virtual size_t sizeOfIncludingThis(nsISupports *aSupports) = 0;
+
+ // A callback that gets a JSObject's nsISupports pointer, if it has one.
+ // Note: this function does *not* addref |iface|.
+ typedef JSBool(*GetISupportsFun)(JSObject *obj, nsISupports **iface);
+ GetISupportsFun getISupports_;
+
+ ObjectPrivateVisitor(GetISupportsFun getISupports)
+ : getISupports_(getISupports)
+ {}
+};
+
+extern JS_PUBLIC_API(bool)
+CollectRuntimeStats(JSRuntime *rt, RuntimeStats *rtStats, ObjectPrivateVisitor *opv);
+
+extern JS_PUBLIC_API(size_t)
+SystemCompartmentCount(JSRuntime *rt);
+
+extern JS_PUBLIC_API(size_t)
+UserCompartmentCount(JSRuntime *rt);
+
+extern JS_PUBLIC_API(size_t)
+PeakSizeOfTemporary(const JSRuntime *rt);
+
+} // namespace JS
+
+#endif /* js_MemoryMetrics_h */
diff --git a/js/public/PropertyKey.h b/js/public/PropertyKey.h
new file mode 100644
index 0000000..c949db1
--- /dev/null
+++ b/js/public/PropertyKey.h
@@ -0,0 +1,98 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS::PropertyKey implementation. */
+
+#ifndef js_PropertyKey_h
+#define js_PropertyKey_h
+
+#include "mozilla/Attributes.h"
+
+#include "js/Value.h"
+
+struct JSContext;
+
+namespace JS {
+
+class PropertyKey;
+
+namespace detail {
+
+extern JS_PUBLIC_API(bool)
+ToPropertyKeySlow(JSContext *cx, HandleValue v, PropertyKey *key);
+
+} // namespace detail
+
+/*
+ * A PropertyKey is a key used to access some property on an object. It is a
+ * natural way to represent a property accessed using a JavaScript value.
+ *
+ * PropertyKey can represent indexes, named properties, and ES6 symbols. The
+ * latter aren't implemented in SpiderMonkey yet, but PropertyKey carves out
+ * space for them.
+ */
+class PropertyKey
+{
+ Value v;
+ friend JS_PUBLIC_API(bool) detail::ToPropertyKeySlow(JSContext *cx, HandleValue v, PropertyKey *key);
+
+ public:
+ explicit PropertyKey(uint32_t index) : v(PrivateUint32Value(index)) {}
+
+ /*
+ * An index is a string property name whose characters exactly spell out an
+ * unsigned 32-bit integer in decimal: "0", "1", "2", ...., "4294967294",
+ * "4294967295".
+ */
+ bool isIndex(uint32_t *index) {
+ // The implementation here assumes that private uint32_t are stored
+ // using the int32_t representation. This is purely an implementation
+ // detail: embedders must not rely upon this!
+ if (!v.isInt32())
+ return false;
+ *index = v.toPrivateUint32();
+ return true;
+ }
+
+ /*
+ * A name is a string property name which is *not* an index. Note that by
+ * the ECMAScript language grammar, any dotted property access |obj.prop|
+ * will access a named property.
+ */
+ bool isName(JSString **str) {
+ uint32_t dummy;
+ if (isIndex(&dummy))
+ return false;
+ *str = v.toString();
+ return true;
+ }
+
+ /*
+ * A symbol is a property name that's a Symbol, a particular kind of object
+ * in ES6. It is the only kind of property name that's not a string.
+ *
+ * SpiderMonkey doesn't yet implement symbols, but we're carving out API
+ * space for them in advance.
+ */
+ bool isSymbol() {
+ return false;
+ }
+};
+
+inline bool
+ToPropertyKey(JSContext *cx, HandleValue v, PropertyKey *key)
+{
+ if (v.isInt32() && v.toInt32() >= 0) {
+ *key = PropertyKey(uint32_t(v.toInt32()));
+ return true;
+ }
+
+ return detail::ToPropertyKeySlow(cx, v, key);
+}
+
+} // namespace JS
+
+#endif /* js_PropertyKey_h */
diff --git a/js/public/RequiredDefines.h b/js/public/RequiredDefines.h
new file mode 100644
index 0000000..6af9ca8
--- /dev/null
+++ b/js/public/RequiredDefines.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Various #defines required to build SpiderMonkey. Embedders should add this
+ * file to the start of the command line via -include or a similar mechanism,
+ * or SpiderMonkey public headers may not work correctly.
+ */
+
+#ifndef js_RequiredDefines_h
+#define js_RequiredDefines_h
+
+/*
+ * The c99 defining the limit macros (UINT32_MAX for example), says:
+ * C++ implementations should define these macros only when __STDC_LIMIT_MACROS
+ * is defined before <stdint.h> is included.
+ */
+#define __STDC_LIMIT_MACROS
+
+#endif /* js_RequiredDefines_h */
diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h
new file mode 100644
index 0000000..89223d1
--- /dev/null
+++ b/js/public/RootingAPI.h
@@ -0,0 +1,957 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_RootingAPI_h
+#define js_RootingAPI_h
+
+#include "mozilla/GuardObjects.h"
+#include "mozilla/TypeTraits.h"
+
+#include "js/Utility.h"
+#include "js/TemplateLib.h"
+
+#include "jspubtd.h"
+
+/*
+ * Moving GC Stack Rooting
+ *
+ * A moving GC may change the physical location of GC allocated things, even
+ * when they are rooted, updating all pointers to the thing to refer to its new
+ * location. The GC must therefore know about all live pointers to a thing,
+ * not just one of them, in order to behave correctly.
+ *
+ * The |Rooted| and |Handle| classes below are used to root stack locations
+ * whose value may be held live across a call that can trigger GC. For a
+ * code fragment such as:
+ *
+ * JSObject *obj = NewObject(cx);
+ * DoSomething(cx);
+ * ... = obj->lastProperty();
+ *
+ * If |DoSomething()| can trigger a GC, the stack location of |obj| must be
+ * rooted to ensure that the GC does not move the JSObject referred to by
+ * |obj| without updating |obj|'s location itself. This rooting must happen
+ * regardless of whether there are other roots which ensure that the object
+ * itself will not be collected.
+ *
+ * If |DoSomething()| cannot trigger a GC, and the same holds for all other
+ * calls made between |obj|'s definitions and its last uses, then no rooting
+ * is required.
+ *
+ * SpiderMonkey can trigger a GC at almost any time and in ways that are not
+ * always clear. For example, the following innocuous-looking actions can
+ * cause a GC: allocation of any new GC thing; JSObject::hasProperty;
+ * JS_ReportError and friends; and ToNumber, among many others. The following
+ * dangerous-looking actions cannot trigger a GC: js_malloc, cx->malloc_,
+ * rt->malloc_, and friends and JS_ReportOutOfMemory.
+ *
+ * The following family of three classes will exactly root a stack location.
+ * Incorrect usage of these classes will result in a compile error in almost
+ * all cases. Therefore, it is very hard to be incorrectly rooted if you use
+ * these classes exclusively. These classes are all templated on the type T of
+ * the value being rooted.
+ *
+ * - Rooted<T> declares a variable of type T, whose value is always rooted.
+ * Rooted<T> may be automatically coerced to a Handle<T>, below. Rooted<T>
+ * should be used whenever a local variable's value may be held live across a
+ * call which can trigger a GC.
+ *
+ * - Handle<T> is a const reference to a Rooted<T>. Functions which take GC
+ * things or values as arguments and need to root those arguments should
+ * generally use handles for those arguments and avoid any explicit rooting.
+ * This has two benefits. First, when several such functions call each other
+ * then redundant rooting of multiple copies of the GC thing can be avoided.
+ * Second, if the caller does not pass a rooted value a compile error will be
+ * generated, which is quicker and easier to fix than when relying on a
+ * separate rooting analysis.
+ *
+ * - MutableHandle<T> is a non-const reference to Rooted<T>. It is used in the
+ * same way as Handle<T> and includes a |set(const T &v)| method to allow
+ * updating the value of the referenced Rooted<T>. A MutableHandle<T> can be
+ * created from a Rooted<T> by using |Rooted<T>::operator&()|.
+ *
+ * In some cases the small performance overhead of exact rooting (measured to
+ * be a few nanoseconds on desktop) is too much. In these cases, try the
+ * following:
+ *
+ * - Move all Rooted<T> above inner loops: this allows you to re-use the root
+ * on each iteration of the loop.
+ *
+ * - Pass Handle<T> through your hot call stack to avoid re-rooting costs at
+ * every invocation.
+ *
+ * The following diagram explains the list of supported, implicit type
+ * conversions between classes of this family:
+ *
+ * Rooted<T> ----> Handle<T>
+ * | ^
+ * | |
+ * | |
+ * +---> MutableHandle<T>
+ * (via &)
+ *
+ * All of these types have an implicit conversion to raw pointers.
+ */
+
+namespace js {
+
+class Module;
+class ScriptSourceObject;
+
+template <typename T>
+struct GCMethods {};
+
+template <typename T>
+class RootedBase {};
+
+template <typename T>
+class HandleBase {};
+
+template <typename T>
+class MutableHandleBase {};
+
+template <typename T>
+class HeapBase {};
+
+/*
+ * js::NullPtr acts like a NULL pointer in contexts that require a Handle.
+ *
+ * Handle provides an implicit constructor for js::NullPtr so that, given:
+ * foo(Handle<JSObject*> h);
+ * callers can simply write:
+ * foo(js::NullPtr());
+ * which avoids creating a Rooted<JSObject*> just to pass NULL.
+ *
+ * This is the SpiderMonkey internal variant. js::NullPtr should be used in
+ * preference to JS::NullPtr to avoid the GOT access required for JS_PUBLIC_API
+ * symbols.
+ */
+struct NullPtr
+{
+ static void * const constNullValue;
+};
+
+namespace gc {
+struct Cell;
+} /* namespace gc */
+
+} /* namespace js */
+
+namespace JS {
+
+template <typename T> class Rooted;
+
+template <typename T> class Handle;
+template <typename T> class MutableHandle;
+
+/* This is exposing internal state of the GC for inlining purposes. */
+JS_FRIEND_API(bool) isGCEnabled();
+
+#if defined(JS_DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
+extern void
+CheckStackRoots(JSContext *cx);
+#endif
+
+/*
+ * JS::NullPtr acts like a NULL pointer in contexts that require a Handle.
+ *
+ * Handle provides an implicit constructor for JS::NullPtr so that, given:
+ * foo(Handle<JSObject*> h);
+ * callers can simply write:
+ * foo(JS::NullPtr());
+ * which avoids creating a Rooted<JSObject*> just to pass NULL.
+ */
+struct JS_PUBLIC_API(NullPtr)
+{
+ static void * const constNullValue;
+};
+
+/*
+ * An encapsulated pointer class for heap based GC thing pointers.
+ *
+ * This implements post-barriers for GC thing pointers stored on the heap. It is
+ * designed to be used for all heap-based GC thing pointers outside the JS
+ * engine.
+ *
+ * The template parameter T must be a JS GC thing pointer, masked pointer or
+ * possible pointer, such as a JS::Value or jsid.
+ *
+ * The class must be used to declare data members of heap classes only.
+ * Stack-based GC thing pointers should used Rooted<T>.
+ *
+ * Write barriers are implemented by overloading the assingment operator.
+ * Assiging to a Heap<T> triggers the appropriate calls into the GC to notify it
+ * of the change.
+ */
+template <typename T>
+class Heap : public js::HeapBase<T>
+{
+ public:
+ Heap() {
+ MOZ_STATIC_ASSERT(sizeof(T) == sizeof(Heap<T>),
+ "Heap<T> must be binary compatible with T.");
+ init(js::GCMethods<T>::initial());
+ }
+ explicit Heap(T p) { init(p); }
+ explicit Heap(const Heap<T> &p) { init(p.ptr); }
+
+ ~Heap() {
+ if (js::GCMethods<T>::needsPostBarrier(ptr))
+ relocate();
+ }
+
+ bool operator!=(const T &other) const { return ptr != other; }
+ bool operator==(const T &other) const { return ptr == other; }
+
+ operator T() const { return ptr; }
+ T operator->() const { return ptr; }
+ const T *address() const { return &ptr; }
+ const T &get() const { return ptr; }
+
+ T *unsafeGet() { return &ptr; }
+
+ Heap<T> &operator=(T p) {
+ set(p);
+ return *this;
+ }
+
+ void set(T newPtr) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(newPtr));
+ if (js::GCMethods<T>::needsPostBarrier(newPtr)) {
+ ptr = newPtr;
+ post();
+ } else if (js::GCMethods<T>::needsPostBarrier(ptr)) {
+ relocate(); /* Called before overwriting ptr. */
+ ptr = newPtr;
+ } else {
+ ptr = newPtr;
+ }
+ }
+
+ private:
+ void init(T newPtr) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(newPtr));
+ ptr = newPtr;
+ if (js::GCMethods<T>::needsPostBarrier(ptr))
+ post();
+ }
+
+ void post() {
+#ifdef JSGC_GENERATIONAL
+ JS_ASSERT(js::GCMethods<T>::needsPostBarrier(ptr));
+ js::GCMethods<T>::postBarrier(&ptr);
+#endif
+ }
+
+ void relocate() {
+#ifdef JSGC_GENERATIONAL
+ js::GCMethods<T>::relocate(&ptr);
+#endif
+ }
+
+ T ptr;
+};
+
+/*
+ * Reference to a T that has been rooted elsewhere. This is most useful
+ * as a parameter type, which guarantees that the T lvalue is properly
+ * rooted. See "Move GC Stack Rooting" above.
+ *
+ * If you want to add additional methods to Handle for a specific
+ * specialization, define a HandleBase<T> specialization containing them.
+ */
+template <typename T>
+class MOZ_NONHEAP_CLASS Handle : public js::HandleBase<T>
+{
+ friend class MutableHandle<T>;
+
+ public:
+ /* Creates a handle from a handle of a type convertible to T. */
+ template <typename S>
+ Handle(Handle<S> handle,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0)
+ {
+ MOZ_STATIC_ASSERT(sizeof(Handle<T>) == sizeof(T *),
+ "Handle must be binary compatible with T*.");
+ ptr = reinterpret_cast<const T *>(handle.address());
+ }
+
+ /* Create a handle for a NULL pointer. */
+ Handle(js::NullPtr) {
+ MOZ_STATIC_ASSERT(mozilla::IsPointer<T>::value,
+ "js::NullPtr overload not valid for non-pointer types");
+ ptr = reinterpret_cast<const T *>(&js::NullPtr::constNullValue);
+ }
+
+ /* Create a handle for a NULL pointer. */
+ Handle(JS::NullPtr) {
+ MOZ_STATIC_ASSERT(mozilla::IsPointer<T>::value,
+ "JS::NullPtr overload not valid for non-pointer types");
+ ptr = reinterpret_cast<const T *>(&JS::NullPtr::constNullValue);
+ }
+
+ Handle(MutableHandle<T> handle) {
+ ptr = handle.address();
+ }
+
+ Handle(const Heap<T> &heapPtr) {
+ ptr = heapPtr.address();
+ }
+
+ /*
+ * This may be called only if the location of the T is guaranteed
+ * to be marked (for some reason other than being a Rooted),
+ * e.g., if it is guaranteed to be reachable from an implicit root.
+ *
+ * Create a Handle from a raw location of a T.
+ */
+ static Handle fromMarkedLocation(const T *p) {
+ Handle h;
+ h.ptr = p;
+ return h;
+ }
+
+ /*
+ * Construct a handle from an explicitly rooted location. This is the
+ * normal way to create a handle, and normally happens implicitly.
+ */
+ template <typename S>
+ inline
+ Handle(const Rooted<S> &root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
+
+ /* Construct a read only handle from a mutable handle. */
+ template <typename S>
+ inline
+ Handle(MutableHandle<S> &root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
+
+ const T *address() const { return ptr; }
+ const T& get() const { return *ptr; }
+
+ /*
+ * Return a reference so passing a Handle<T> to something that
+ * takes a |const T&| is not a GC hazard.
+ */
+ operator const T&() const { return get(); }
+ T operator->() const { return get(); }
+
+ bool operator!=(const T &other) const { return *ptr != other; }
+ bool operator==(const T &other) const { return *ptr == other; }
+
+ private:
+ Handle() {}
+
+ const T *ptr;
+
+ template <typename S>
+ void operator=(S v) MOZ_DELETE;
+};
+
+typedef Handle<JSObject*> HandleObject;
+typedef Handle<js::Module*> HandleModule;
+typedef Handle<js::ScriptSourceObject *> HandleScriptSource;
+typedef Handle<JSFunction*> HandleFunction;
+typedef Handle<JSScript*> HandleScript;
+typedef Handle<JSString*> HandleString;
+typedef Handle<jsid> HandleId;
+typedef Handle<Value> HandleValue;
+
+/*
+ * Similar to a handle, but the underlying storage can be changed. This is
+ * useful for outparams.
+ *
+ * If you want to add additional methods to MutableHandle for a specific
+ * specialization, define a MutableHandleBase<T> specialization containing
+ * them.
+ */
+template <typename T>
+class MOZ_STACK_CLASS MutableHandle : public js::MutableHandleBase<T>
+{
+ public:
+ inline MutableHandle(Rooted<T> *root);
+
+ void set(T v) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(v));
+ *ptr = v;
+ }
+
+ /*
+ * This may be called only if the location of the T is guaranteed
+ * to be marked (for some reason other than being a Rooted),
+ * e.g., if it is guaranteed to be reachable from an implicit root.
+ *
+ * Create a MutableHandle from a raw location of a T.
+ */
+ static MutableHandle fromMarkedLocation(T *p) {
+ MutableHandle h;
+ h.ptr = p;
+ return h;
+ }
+
+ T *address() const { return ptr; }
+ T get() const { return *ptr; }
+
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+
+ private:
+ MutableHandle() {}
+
+ T *ptr;
+
+ template <typename S>
+ void operator=(S v) MOZ_DELETE;
+};
+
+typedef MutableHandle<JSObject*> MutableHandleObject;
+typedef MutableHandle<JSFunction*> MutableHandleFunction;
+typedef MutableHandle<JSScript*> MutableHandleScript;
+typedef MutableHandle<JSString*> MutableHandleString;
+typedef MutableHandle<jsid> MutableHandleId;
+typedef MutableHandle<Value> MutableHandleValue;
+
+#ifdef JSGC_GENERATIONAL
+JS_PUBLIC_API(void) HeapCellPostBarrier(js::gc::Cell **cellp);
+JS_PUBLIC_API(void) HeapCellRelocate(js::gc::Cell **cellp);
+#endif
+
+} /* namespace JS */
+
+namespace js {
+
+/*
+ * InternalHandle is a handle to an internal pointer into a gcthing. Use
+ * InternalHandle when you have a pointer to a direct field of a gcthing, or
+ * when you need a parameter type for something that *may* be a pointer to a
+ * direct field of a gcthing.
+ */
+template <typename T>
+class InternalHandle {};
+
+template <typename T>
+class InternalHandle<T*>
+{
+ void * const *holder;
+ size_t offset;
+
+ public:
+ /*
+ * Create an InternalHandle using a Handle to the gcthing containing the
+ * field in question, and a pointer to the field.
+ */
+ template<typename H>
+ InternalHandle(const JS::Handle<H> &handle, T *field)
+ : holder((void**)handle.address()), offset(uintptr_t(field) - uintptr_t(handle.get()))
+ {}
+
+ /*
+ * Create an InternalHandle to a field within a Rooted<>.
+ */
+ template<typename R>
+ InternalHandle(const JS::Rooted<R> &root, T *field)
+ : holder((void**)root.address()), offset(uintptr_t(field) - uintptr_t(root.get()))
+ {}
+
+ T *get() const { return reinterpret_cast<T*>(uintptr_t(*holder) + offset); }
+
+ const T &operator*() const { return *get(); }
+ T *operator->() const { return get(); }
+
+ static InternalHandle<T*> fromMarkedLocation(T *fieldPtr) {
+ return InternalHandle(fieldPtr);
+ }
+
+ private:
+ /*
+ * Create an InternalHandle to something that is not a pointer to a
+ * gcthing, and so does not need to be rooted in the first place. Use these
+ * InternalHandles to pass pointers into functions that also need to accept
+ * regular InternalHandles to gcthing fields.
+ *
+ * Make this private to prevent accidental misuse; this is only for
+ * fromMarkedLocation().
+ */
+ InternalHandle(T *field)
+ : holder(reinterpret_cast<void * const *>(&js::NullPtr::constNullValue)),
+ offset(uintptr_t(field))
+ {}
+};
+
+/*
+ * By default, pointers should use the inheritance hierarchy to find their
+ * ThingRootKind. Some pointer types are explicitly set in jspubtd.h so that
+ * Rooted<T> may be used without the class definition being available.
+ */
+template <typename T>
+struct RootKind<T *>
+{
+ static ThingRootKind rootKind() { return T::rootKind(); }
+};
+
+template <typename T>
+struct GCMethods<T *>
+{
+ static T *initial() { return NULL; }
+ static ThingRootKind kind() { return RootKind<T *>::rootKind(); }
+ static bool poisoned(T *v) { return JS::IsPoisonedPtr(v); }
+ static bool needsPostBarrier(T *v) { return v; }
+#ifdef JSGC_GENERATIONAL
+ static void postBarrier(T **vp) {
+ JS::HeapCellPostBarrier(reinterpret_cast<js::gc::Cell **>(vp));
+ }
+ static void relocate(T **vp) {
+ JS::HeapCellRelocate(reinterpret_cast<js::gc::Cell **>(vp));
+ }
+#endif
+};
+
+#if defined(JS_DEBUG) && defined(JS_THREADSAFE)
+/* This helper allows us to assert that Rooted<T> is scoped within a request. */
+extern JS_PUBLIC_API(bool)
+IsInRequest(JSContext *cx);
+#endif
+
+} /* namespace js */
+
+namespace JS {
+
+/*
+ * Local variable of type T whose value is always rooted. This is typically
+ * used for local variables, or for non-rooted values being passed to a
+ * function that requires a handle, e.g. Foo(Root<T>(cx, x)).
+ *
+ * If you want to add additional methods to Rooted for a specific
+ * specialization, define a RootedBase<T> specialization containing them.
+ */
+template <typename T>
+class MOZ_STACK_CLASS Rooted : public js::RootedBase<T>
+{
+ void init(JSContext *cxArg) {
+ MOZ_ASSERT(cxArg);
+#if defined(JS_DEBUG) && defined(JS_THREADSAFE)
+ MOZ_ASSERT(js::IsInRequest(cxArg));
+#endif
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ js::ContextFriendFields *cx = js::ContextFriendFields::get(cxArg);
+ commonInit(cx->thingGCRooters);
+#endif
+ }
+
+ void init(js::PerThreadDataFriendFields *pt) {
+ MOZ_ASSERT(pt);
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ commonInit(pt->thingGCRooters);
+#endif
+ }
+
+ public:
+ Rooted(JSContext *cx
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(js::GCMethods<T>::initial())
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(cx);
+ }
+
+ Rooted(JSContext *cx, T initial
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(initial)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(cx);
+ }
+
+ Rooted(js::PerThreadData *pt
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(js::GCMethods<T>::initial())
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(js::PerThreadDataFriendFields::get(pt));
+ }
+
+ Rooted(js::PerThreadData *pt, T initial
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(initial)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(js::PerThreadDataFriendFields::get(pt));
+ }
+
+ Rooted(JSRuntime *rt
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(js::GCMethods<T>::initial())
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(js::PerThreadDataFriendFields::getMainThread(rt));
+ }
+
+ Rooted(JSRuntime *rt, T initial
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(initial)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ init(js::PerThreadDataFriendFields::getMainThread(rt));
+ }
+
+ ~Rooted() {
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ JS_ASSERT(*stack == reinterpret_cast<Rooted<void*>*>(this));
+ *stack = prev;
+#endif
+ }
+
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ Rooted<T> *previous() { return prev; }
+#endif
+
+ /*
+ * Important: Return a reference here so passing a Rooted<T> to
+ * something that takes a |const T&| is not a GC hazard.
+ */
+ operator const T&() const { return ptr; }
+ T operator->() const { return ptr; }
+ T *address() { return &ptr; }
+ const T *address() const { return &ptr; }
+ T &get() { return ptr; }
+ const T &get() const { return ptr; }
+
+ T &operator=(T value) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(value));
+ ptr = value;
+ return ptr;
+ }
+
+ T &operator=(const Rooted &value) {
+ ptr = value;
+ return ptr;
+ }
+
+ void set(T value) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(value));
+ ptr = value;
+ }
+
+ bool operator!=(const T &other) const { return ptr != other; }
+ bool operator==(const T &other) const { return ptr == other; }
+
+ private:
+ void commonInit(Rooted<void*> **thingGCRooters) {
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ js::ThingRootKind kind = js::GCMethods<T>::kind();
+ this->stack = &thingGCRooters[kind];
+ this->prev = *stack;
+ *stack = reinterpret_cast<Rooted<void*>*>(this);
+
+ JS_ASSERT(!js::GCMethods<T>::poisoned(ptr));
+#endif
+ }
+
+#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
+ Rooted<void*> **stack, *prev;
+#endif
+
+#if defined(JS_DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
+ /* Has the rooting analysis ever scanned this Rooted's stack location? */
+ friend void JS::CheckStackRoots(JSContext*);
+ bool scanned;
+#endif
+
+ /*
+ * |ptr| must be the last field in Rooted because the analysis treats all
+ * Rooted as Rooted<void*> during the analysis. See bug 829372.
+ */
+ T ptr;
+
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+
+ Rooted(const Rooted &) MOZ_DELETE;
+};
+
+#if !(defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING))
+// Defined in vm/String.h.
+template <>
+class Rooted<JSStableString *>;
+#endif
+
+typedef Rooted<JSObject*> RootedObject;
+typedef Rooted<js::Module*> RootedModule;
+typedef Rooted<js::ScriptSourceObject *> RootedScriptSource;
+typedef Rooted<JSFunction*> RootedFunction;
+typedef Rooted<JSScript*> RootedScript;
+typedef Rooted<JSString*> RootedString;
+typedef Rooted<jsid> RootedId;
+typedef Rooted<JS::Value> RootedValue;
+
+} /* namespace JS */
+
+namespace js {
+
+/*
+ * Mark a stack location as a root for the rooting analysis, without actually
+ * rooting it in release builds. This should only be used for stack locations
+ * of GC things that cannot be relocated by a garbage collection, and that
+ * are definitely reachable via another path.
+ */
+class SkipRoot
+{
+#if defined(JS_DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
+
+ SkipRoot **stack, *prev;
+ const uint8_t *start;
+ const uint8_t *end;
+
+ template <typename T>
+ void init(SkipRoot **head, const T *ptr, size_t count) {
+ this->stack = head;
+ this->prev = *stack;
+ *stack = this;
+ this->start = (const uint8_t *) ptr;
+ this->end = this->start + (sizeof(T) * count);
+ }
+
+ public:
+ template <typename T>
+ SkipRoot(JSContext *cx, const T *ptr, size_t count = 1
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ {
+ init(&ContextFriendFields::get(cx)->skipGCRooters, ptr, count);
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ template <typename T>
+ SkipRoot(js::PerThreadData *ptd, const T *ptr, size_t count = 1
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ {
+ PerThreadDataFriendFields *ptff = PerThreadDataFriendFields::get(ptd);
+ init(&ptff->skipGCRooters, ptr, count);
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ ~SkipRoot() {
+ JS_ASSERT(*stack == this);
+ *stack = prev;
+ }
+
+ SkipRoot *previous() { return prev; }
+
+ bool contains(const uint8_t *v, size_t len) {
+ return v >= start && v + len <= end;
+ }
+
+#else /* JS_DEBUG && JSGC_ROOT_ANALYSIS */
+
+ public:
+ template <typename T>
+ SkipRoot(JSContext *cx, const T *ptr, size_t count = 1
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ template <typename T>
+ SkipRoot(PerThreadData *ptd, const T *ptr, size_t count = 1
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+#endif /* JS_DEBUG && JSGC_ROOT_ANALYSIS */
+
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+};
+
+/* Interface substitute for Rooted<T> which does not root the variable's memory. */
+template <typename T>
+class FakeRooted : public RootedBase<T>
+{
+ public:
+ FakeRooted(JSContext *cx
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(GCMethods<T>::initial())
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ FakeRooted(JSContext *cx, T initial
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : ptr(initial)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ operator T() const { return ptr; }
+ T operator->() const { return ptr; }
+ T *address() { return &ptr; }
+ const T *address() const { return &ptr; }
+ T &get() { return ptr; }
+ const T &get() const { return ptr; }
+
+ T &operator=(T value) {
+ JS_ASSERT(!GCMethods<T>::poisoned(value));
+ ptr = value;
+ return ptr;
+ }
+
+ bool operator!=(const T &other) const { return ptr != other; }
+ bool operator==(const T &other) const { return ptr == other; }
+
+ private:
+ T ptr;
+
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+
+ FakeRooted(const FakeRooted &) MOZ_DELETE;
+};
+
+/* Interface substitute for MutableHandle<T> which is not required to point to rooted memory. */
+template <typename T>
+class FakeMutableHandle : public js::MutableHandleBase<T>
+{
+ public:
+ FakeMutableHandle(T *t) {
+ ptr = t;
+ }
+
+ FakeMutableHandle(FakeRooted<T> *root) {
+ ptr = root->address();
+ }
+
+ void set(T v) {
+ JS_ASSERT(!js::GCMethods<T>::poisoned(v));
+ *ptr = v;
+ }
+
+ T *address() const { return ptr; }
+ T get() const { return *ptr; }
+
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+
+ private:
+ FakeMutableHandle() {}
+
+ T *ptr;
+
+ template <typename S>
+ void operator=(S v) MOZ_DELETE;
+};
+
+/*
+ * Types for a variable that either should or shouldn't be rooted, depending on
+ * the template parameter Rooted. Used for implementing functions that can
+ * operate on either rooted or unrooted data.
+ *
+ * The toHandle() and toMutableHandle() functions are for calling functions
+ * which require handle types and are only called in the CanGC case. These
+ * allow the calling code to type check.
+ */
+enum AllowGC {
+ NoGC = 0,
+ CanGC = 1
+};
+template <typename T, AllowGC allowGC>
+class MaybeRooted
+{
+};
+
+template <typename T> class MaybeRooted<T, CanGC>
+{
+ public:
+ typedef JS::Handle<T> HandleType;
+ typedef JS::Rooted<T> RootType;
+ typedef JS::MutableHandle<T> MutableHandleType;
+
+ static inline JS::Handle<T> toHandle(HandleType v) {
+ return v;
+ }
+
+ static inline JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
+ return v;
+ }
+};
+
+template <typename T> class MaybeRooted<T, NoGC>
+{
+ public:
+ typedef T HandleType;
+ typedef FakeRooted<T> RootType;
+ typedef FakeMutableHandle<T> MutableHandleType;
+
+ static inline JS::Handle<T> toHandle(HandleType v) {
+ JS_NOT_REACHED("Bad conversion");
+ return JS::Handle<T>::fromMarkedLocation(NULL);
+ }
+
+ static inline JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
+ JS_NOT_REACHED("Bad conversion");
+ return JS::MutableHandle<T>::fromMarkedLocation(NULL);
+ }
+};
+
+} /* namespace js */
+
+namespace JS {
+
+template <typename T> template <typename S>
+inline
+Handle<T>::Handle(const Rooted<S> &root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
+{
+ ptr = reinterpret_cast<const T *>(root.address());
+}
+
+template <typename T> template <typename S>
+inline
+Handle<T>::Handle(MutableHandle<S> &root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
+{
+ ptr = reinterpret_cast<const T *>(root.address());
+}
+
+template <typename T>
+inline
+MutableHandle<T>::MutableHandle(Rooted<T> *root)
+{
+ MOZ_STATIC_ASSERT(sizeof(MutableHandle<T>) == sizeof(T *),
+ "MutableHandle must be binary compatible with T*.");
+ ptr = root->address();
+}
+
+} /* namespace JS */
+
+namespace js {
+
+/*
+ * Hook for dynamic root analysis. Checks the native stack and poisons
+ * references to GC things which have not been rooted.
+ */
+inline void MaybeCheckStackRoots(JSContext *cx)
+{
+#if defined(JS_DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
+ JS::CheckStackRoots(cx);
+#endif
+}
+
+/* Base class for automatic read-only object rooting during compilation. */
+class CompilerRootNode
+{
+ protected:
+ CompilerRootNode(js::gc::Cell *ptr) : next(NULL), ptr_(ptr) {}
+
+ public:
+ void **address() { return (void **)&ptr_; }
+
+ public:
+ CompilerRootNode *next;
+
+ protected:
+ js::gc::Cell *ptr_;
+};
+
+} /* namespace js */
+
+#endif /* js_RootingAPI_h */
diff --git a/js/public/TemplateLib.h b/js/public/TemplateLib.h
new file mode 100644
index 0000000..711158f
--- /dev/null
+++ b/js/public/TemplateLib.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TemplateLib_h
+#define js_TemplateLib_h
+
+#include "jstypes.h"
+
+/*
+ * Library of reusable template meta-functions (that is, functions on types and
+ * compile-time values). Meta-functions are placed inside the 'tl' namespace to
+ * avoid conflict with non-meta functions that logically have the same name
+ * (e.g., js::tl::Min vs. js::Min).
+ */
+
+namespace js {
+namespace tl {
+
+/* Compute min/max/clamp. */
+template <size_t i, size_t j> struct Min {
+ static const size_t result = i < j ? i : j;
+};
+template <size_t i, size_t j> struct Max {
+ static const size_t result = i > j ? i : j;
+};
+template <size_t i, size_t min, size_t max> struct Clamp {
+ static const size_t result = i < min ? min : (i > max ? max : i);
+};
+
+/* Compute x^y. */
+template <size_t x, size_t y> struct Pow {
+ static const size_t result = x * Pow<x, y - 1>::result;
+};
+template <size_t x> struct Pow<x,0> {
+ static const size_t result = 1;
+};
+
+/* Compute floor(log2(i)). */
+template <size_t i> struct FloorLog2 {
+ static const size_t result = 1 + FloorLog2<i / 2>::result;
+};
+template <> struct FloorLog2<0> { /* Error */ };
+template <> struct FloorLog2<1> { static const size_t result = 0; };
+
+/* Compute ceiling(log2(i)). */
+template <size_t i> struct CeilingLog2 {
+ static const size_t result = FloorLog2<2 * i - 1>::result;
+};
+
+/* Round up to the nearest power of 2. */
+template <size_t i> struct RoundUpPow2 {
+ static const size_t result = size_t(1) << CeilingLog2<i>::result;
+};
+template <> struct RoundUpPow2<0> {
+ static const size_t result = 1;
+};
+
+/* Compute the number of bits in the given unsigned type. */
+template <class T> struct BitSize {
+ static const size_t result = sizeof(T) * JS_BITS_PER_BYTE;
+};
+
+/*
+ * Produce an N-bit mask, where N <= BitSize<size_t>::result. Handle the
+ * language-undefined edge case when N = BitSize<size_t>::result.
+ */
+template <size_t N> struct NBitMask {
+ // Assert the precondition. On success this evaluates to 0. Otherwise it
+ // triggers divide-by-zero at compile time: a guaranteed compile error in
+ // C++11, and usually one in C++98. Add this value to |result| to assure
+ // its computation.
+ static const size_t checkPrecondition = 0 / size_t(N < BitSize<size_t>::result);
+ static const size_t result = (size_t(1) << N) - 1 + checkPrecondition;
+};
+template <> struct NBitMask<BitSize<size_t>::result> {
+ static const size_t result = size_t(-1);
+};
+
+/*
+ * For the unsigned integral type size_t, compute a mask M for N such that
+ * for all X, !(X & M) implies X * N will not overflow (w.r.t size_t)
+ */
+template <size_t N> struct MulOverflowMask {
+ static const size_t result =
+ ~NBitMask<BitSize<size_t>::result - CeilingLog2<N>::result>::result;
+};
+template <> struct MulOverflowMask<0> { /* Error */ };
+template <> struct MulOverflowMask<1> { static const size_t result = 0; };
+
+/*
+ * Generate a mask for T such that if (X & sUnsafeRangeSizeMask), an X-sized
+ * array of T's is big enough to cause a ptrdiff_t overflow when subtracting
+ * a pointer to the end of the array from the beginning.
+ */
+template <class T> struct UnsafeRangeSizeMask {
+ /*
+ * The '2' factor means the top bit is clear, sizeof(T) converts from
+ * units of elements to bytes.
+ */
+ static const size_t result = MulOverflowMask<2 * sizeof(T)>::result;
+};
+
+template <bool cond, typename T, T v1, T v2> struct If { static const T result = v1; };
+template <typename T, T v1, T v2> struct If<false, T, v1, v2> { static const T result = v2; };
+
+/*
+ * Traits class for identifying types that are implicitly barriered.
+ */
+template <class T> struct IsRelocatableHeapType { static const bool result = true; };
+
+} /* namespace tl */
+} /* namespace js */
+
+#endif /* js_TemplateLib_h */
diff --git a/js/public/Utility.h b/js/public/Utility.h
new file mode 100644
index 0000000..ae50a2a
--- /dev/null
+++ b/js/public/Utility.h
@@ -0,0 +1,871 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Utility_h
+#define js_Utility_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Compiler.h"
+#include "mozilla/Scoped.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef JS_OOM_DO_BACKTRACES
+#include <stdio.h>
+#include <execinfo.h>
+#endif
+
+#include "jstypes.h"
+
+#include "js/TemplateLib.h"
+
+/* The public JS engine namespace. */
+namespace JS {}
+
+/* The mozilla-shared reusable template/utility namespace. */
+namespace mozilla {}
+
+/* The private JS engine namespace. */
+namespace js {}
+
+/*
+ * Pattern used to overwrite freed memory. If you are accessing an object with
+ * this pattern, you probably have a dangling pointer.
+ */
+#define JS_FREE_PATTERN 0xDA
+
+#define JS_ASSERT(expr) MOZ_ASSERT(expr)
+#define JS_ASSERT_IF(cond, expr) MOZ_ASSERT_IF(cond, expr)
+#define JS_NOT_REACHED(reason) MOZ_NOT_REACHED(reason)
+#define JS_ALWAYS_TRUE(expr) MOZ_ALWAYS_TRUE(expr)
+#define JS_ALWAYS_FALSE(expr) MOZ_ALWAYS_FALSE(expr)
+
+#ifdef JS_DEBUG
+# ifdef JS_THREADSAFE
+# define JS_THREADSAFE_ASSERT(expr) JS_ASSERT(expr)
+# else
+# define JS_THREADSAFE_ASSERT(expr) ((void) 0)
+# endif
+#else
+# define JS_THREADSAFE_ASSERT(expr) ((void) 0)
+#endif
+
+#if defined(JS_DEBUG)
+# define JS_DIAGNOSTICS_ASSERT(expr) MOZ_ASSERT(expr)
+#elif defined(JS_CRASH_DIAGNOSTICS)
+# define JS_DIAGNOSTICS_ASSERT(expr) do { if (!(expr)) MOZ_CRASH(); } while(0)
+#else
+# define JS_DIAGNOSTICS_ASSERT(expr) ((void) 0)
+#endif
+
+#define JS_STATIC_ASSERT(cond) MOZ_STATIC_ASSERT(cond, "JS_STATIC_ASSERT")
+#define JS_STATIC_ASSERT_IF(cond, expr) MOZ_STATIC_ASSERT_IF(cond, expr, "JS_STATIC_ASSERT_IF")
+
+extern MOZ_NORETURN JS_PUBLIC_API(void)
+JS_Assert(const char *s, const char *file, int ln);
+
+/*
+ * Abort the process in a non-graceful manner. This will cause a core file,
+ * call to the debugger or other moral equivalent as well as causing the
+ * entire process to stop.
+ */
+extern JS_PUBLIC_API(void) JS_Abort(void);
+
+/*
+ * Custom allocator support for SpiderMonkey
+ */
+#if defined JS_USE_CUSTOM_ALLOCATOR
+# include "jscustomallocator.h"
+#else
+# ifdef JS_DEBUG
+/*
+ * In order to test OOM conditions, when the testing function
+ * oomAfterAllocations COUNT is passed, we fail continuously after the NUM'th
+ * allocation from now.
+ */
+extern JS_PUBLIC_DATA(uint32_t) OOM_maxAllocations; /* set in builtins/TestingFunctions.cpp */
+extern JS_PUBLIC_DATA(uint32_t) OOM_counter; /* data race, who cares. */
+
+#ifdef JS_OOM_DO_BACKTRACES
+#define JS_OOM_BACKTRACE_SIZE 32
+static JS_ALWAYS_INLINE void
+PrintBacktrace()
+{
+ void* OOM_trace[JS_OOM_BACKTRACE_SIZE];
+ char** OOM_traceSymbols = NULL;
+ int32_t OOM_traceSize = 0;
+ int32_t OOM_traceIdx = 0;
+ OOM_traceSize = backtrace(OOM_trace, JS_OOM_BACKTRACE_SIZE);
+ OOM_traceSymbols = backtrace_symbols(OOM_trace, OOM_traceSize);
+
+ if (!OOM_traceSymbols)
+ return;
+
+ for (OOM_traceIdx = 0; OOM_traceIdx < OOM_traceSize; ++OOM_traceIdx) {
+ fprintf(stderr, "#%d %s\n", OOM_traceIdx, OOM_traceSymbols[OOM_traceIdx]);
+ }
+
+ // This must be free(), not js_free(), because backtrace_symbols()
+ // allocates with malloc().
+ free(OOM_traceSymbols);
+}
+
+#define JS_OOM_EMIT_BACKTRACE() \
+ do {\
+ fprintf(stderr, "Forcing artificial memory allocation function failure:\n");\
+ PrintBacktrace();\
+ } while (0)
+# else
+# define JS_OOM_EMIT_BACKTRACE() do {} while(0)
+#endif /* JS_OOM_DO_BACKTRACES */
+
+# define JS_OOM_POSSIBLY_FAIL() \
+ do \
+ { \
+ if (++OOM_counter > OOM_maxAllocations) { \
+ JS_OOM_EMIT_BACKTRACE();\
+ return NULL; \
+ } \
+ } while (0)
+
+# define JS_OOM_POSSIBLY_FAIL_REPORT(cx) \
+ do \
+ { \
+ if (++OOM_counter > OOM_maxAllocations) { \
+ JS_OOM_EMIT_BACKTRACE();\
+ js_ReportOutOfMemory(cx);\
+ return NULL; \
+ } \
+ } while (0)
+
+# else
+# define JS_OOM_POSSIBLY_FAIL() do {} while(0)
+# define JS_OOM_POSSIBLY_FAIL_REPORT(cx) do {} while(0)
+# endif /* JS_DEBUG */
+
+static JS_INLINE void* js_malloc(size_t bytes)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return malloc(bytes);
+}
+
+static JS_INLINE void* js_calloc(size_t bytes)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return calloc(bytes, 1);
+}
+
+static JS_INLINE void* js_calloc(size_t nmemb, size_t size)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return calloc(nmemb, size);
+}
+
+static JS_INLINE void* js_realloc(void* p, size_t bytes)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return realloc(p, bytes);
+}
+
+static JS_INLINE void js_free(void* p)
+{
+ free(p);
+}
+#endif/* JS_USE_CUSTOM_ALLOCATOR */
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Replace bit-scanning code sequences with CPU-specific instructions to
+ * speedup calculations of ceiling/floor log2.
+ *
+ * With GCC 3.4 or later we can use __builtin_clz for that, see bug 327129.
+ *
+ * SWS: Added MSVC intrinsic bitscan support. See bugs 349364 and 356856.
+ */
+#if defined(_WIN32) && (_MSC_VER >= 1300) && (defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64))
+
+unsigned char _BitScanForward(unsigned long * Index, unsigned long Mask);
+unsigned char _BitScanReverse(unsigned long * Index, unsigned long Mask);
+# pragma intrinsic(_BitScanForward,_BitScanReverse)
+
+__forceinline static int
+__BitScanForward32(unsigned int val)
+{
+ unsigned long idx;
+
+ _BitScanForward(&idx, (unsigned long)val);
+ return (int)idx;
+}
+__forceinline static int
+__BitScanReverse32(unsigned int val)
+{
+ unsigned long idx;
+
+ _BitScanReverse(&idx, (unsigned long)val);
+ return (int)(31-idx);
+}
+# define js_bitscan_ctz32(val) __BitScanForward32(val)
+# define js_bitscan_clz32(val) __BitScanReverse32(val)
+
+#if defined(_M_AMD64) || defined(_M_X64)
+unsigned char _BitScanForward64(unsigned long * Index, unsigned __int64 Mask);
+unsigned char _BitScanReverse64(unsigned long * Index, unsigned __int64 Mask);
+# pragma intrinsic(_BitScanForward64,_BitScanReverse64)
+#endif
+
+__forceinline static int
+__BitScanForward64(unsigned __int64 val)
+{
+#if defined(_M_AMD64) || defined(_M_X64)
+ unsigned long idx;
+
+ _BitScanForward64(&idx, val);
+ return (int)idx;
+#else
+ uint32_t lo = (uint32_t)val;
+ uint32_t hi = (uint32_t)(val >> 32);
+ return lo != 0 ?
+ js_bitscan_ctz32(lo) :
+ 32 + js_bitscan_ctz32(hi);
+#endif
+}
+__forceinline static int
+__BitScanReverse64(unsigned __int64 val)
+{
+#if defined(_M_AMD64) || defined(_M_X64)
+ unsigned long idx;
+
+ _BitScanReverse64(&idx, val);
+ return (int)(63-idx);
+#else
+ uint32_t lo = (uint32_t)val;
+ uint32_t hi = (uint32_t)(val >> 32);
+ return hi != 0 ?
+ js_bitscan_clz32(hi) :
+ 32 + js_bitscan_clz32(lo);
+#endif
+}
+# define js_bitscan_ctz64(val) __BitScanForward64(val)
+# define js_bitscan_clz64(val) __BitScanReverse64(val)
+#elif MOZ_IS_GCC
+
+#if MOZ_GCC_VERSION_AT_LEAST(3, 4, 0)
+# define USE_BUILTIN_CTZ
+#endif
+
+#elif defined(__clang__)
+
+#if __has_builtin(__builtin_ctz)
+# define USE_BUILTIN_CTZ
+#endif
+
+#endif
+
+#if defined(USE_BUILTIN_CTZ)
+
+JS_STATIC_ASSERT(sizeof(unsigned int) == sizeof(uint32_t));
+# define js_bitscan_ctz32(val) __builtin_ctz(val)
+# define js_bitscan_clz32(val) __builtin_clz(val)
+
+JS_STATIC_ASSERT(sizeof(unsigned long long) == sizeof(uint64_t));
+# define js_bitscan_ctz64(val) __builtin_ctzll(val)
+# define js_bitscan_clz64(val) __builtin_clzll(val)
+
+# undef USE_BUILTIN_CTZ
+
+#endif
+
+/*
+** Macro version of JS_CeilingLog2: Compute the log of the least power of
+** 2 greater than or equal to _n. The result is returned in _log2.
+*/
+/*
+ * Use intrinsic function or count-leading-zeros to calculate ceil(log2(_n)).
+ * The macro checks for "n <= 1" and not "n != 0" as js_bitscan_clz32(0) is
+ * undefined.
+ */
+# define JS_CEILING_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ unsigned int j_ = (unsigned int)(_n); \
+ (_log2) = (j_ <= 1 ? 0 : 32 - js_bitscan_clz32(j_ - 1)); \
+ JS_END_MACRO
+
+/*
+** Macro version of JS_FloorLog2: Compute the log of the greatest power of
+** 2 less than or equal to _n. The result is returned in _log2.
+**
+** This is equivalent to finding the highest set bit in the word.
+*/
+/*
+ * Use js_bitscan_clz32 or count-leading-zeros to calculate floor(log2(_n)).
+ * Since js_bitscan_clz32(0) is undefined, the macro set the loweset bit to 1
+ * to ensure 0 result when _n == 0.
+ */
+# define JS_FLOOR_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ (_log2) = 31 - js_bitscan_clz32(((unsigned int)(_n)) | 1); \
+ JS_END_MACRO
+
+#if JS_BYTES_PER_WORD == 4
+# define js_FloorLog2wImpl(n) \
+ ((size_t)(JS_BITS_PER_WORD - 1 - js_bitscan_clz32(n)))
+#elif JS_BYTES_PER_WORD == 8
+# define js_FloorLog2wImpl(n) \
+ ((size_t)(JS_BITS_PER_WORD - 1 - js_bitscan_clz64(n)))
+#else
+# error "NOT SUPPORTED"
+#endif
+
+JS_END_EXTERN_C
+
+/*
+ * Internal function.
+ * Compute the log of the least power of 2 greater than or equal to n. This is
+ * a version of JS_CeilingLog2 that operates on unsigned integers with
+ * CPU-dependant size.
+ */
+#define JS_CEILING_LOG2W(n) ((n) <= 1 ? 0 : 1 + JS_FLOOR_LOG2W((n) - 1))
+
+/*
+ * Internal function.
+ * Compute the log of the greatest power of 2 less than or equal to n.
+ * This is a version of JS_FloorLog2 that operates on unsigned integers with
+ * CPU-dependant size and requires that n != 0.
+ */
+static MOZ_ALWAYS_INLINE size_t
+JS_FLOOR_LOG2W(size_t n)
+{
+ JS_ASSERT(n != 0);
+ return js_FloorLog2wImpl(n);
+}
+
+/*
+ * JS_ROTATE_LEFT32
+ *
+ * There is no rotate operation in the C Language so the construct (a << 4) |
+ * (a >> 28) is used instead. Most compilers convert this to a rotate
+ * instruction but some versions of MSVC don't without a little help. To get
+ * MSVC to generate a rotate instruction, we have to use the _rotl intrinsic
+ * and use a pragma to make _rotl inline.
+ *
+ * MSVC in VS2005 will do an inline rotate instruction on the above construct.
+ */
+#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_AMD64) || \
+ defined(_M_X64))
+#include <stdlib.h>
+#pragma intrinsic(_rotl)
+#define JS_ROTATE_LEFT32(a, bits) _rotl(a, bits)
+#else
+#define JS_ROTATE_LEFT32(a, bits) (((a) << (bits)) | ((a) >> (32 - (bits))))
+#endif
+
+#include <new>
+
+/*
+ * Low-level memory management in SpiderMonkey:
+ *
+ * ** Do not use the standard malloc/free/realloc: SpiderMonkey allows these
+ * to be redefined (via JS_USE_CUSTOM_ALLOCATOR) and Gecko even #define's
+ * these symbols.
+ *
+ * ** Do not use the builtin C++ operator new and delete: these throw on
+ * error and we cannot override them not to.
+ *
+ * Allocation:
+ *
+ * - If the lifetime of the allocation is tied to the lifetime of a GC-thing
+ * (that is, finalizing the GC-thing will free the allocation), call one of
+ * the following functions:
+ *
+ * JSContext::{malloc_,realloc_,calloc_,new_}
+ * JSRuntime::{malloc_,realloc_,calloc_,new_}
+ *
+ * These functions accumulate the number of bytes allocated which is used as
+ * part of the GC-triggering heuristic.
+ *
+ * The difference between the JSContext and JSRuntime versions is that the
+ * cx version reports an out-of-memory error on OOM. (This follows from the
+ * general SpiderMonkey idiom that a JSContext-taking function reports its
+ * own errors.)
+ *
+ * - Otherwise, use js_malloc/js_realloc/js_calloc/js_free/js_new
+ *
+ * Deallocation:
+ *
+ * - Ordinarily, use js_free/js_delete.
+ *
+ * - For deallocations during GC finalization, use one of the following
+ * operations on the FreeOp provided to the finalizer:
+ *
+ * FreeOp::{free_,delete_}
+ *
+ * The advantage of these operations is that the memory is batched and freed
+ * on another thread.
+ */
+
+#define JS_NEW_BODY(allocator, t, parms) \
+ void *memory = allocator(sizeof(t)); \
+ return memory ? new(memory) t parms : NULL;
+
+/*
+ * Given a class which should provide 'new' methods, add
+ * JS_DECLARE_NEW_METHODS (see JSContext for a usage example). This
+ * adds news with up to 12 parameters. Add more versions of new below if
+ * you need more than 12 parameters.
+ *
+ * Note: Do not add a ; at the end of a use of JS_DECLARE_NEW_METHODS,
+ * or the build will break.
+ */
+#define JS_DECLARE_NEW_METHODS(NEWNAME, ALLOCATOR, QUALIFIERS)\
+ template <class T>\
+ QUALIFIERS T *NEWNAME() {\
+ JS_NEW_BODY(ALLOCATOR, T, ())\
+ }\
+\
+ template <class T, class P1>\
+ QUALIFIERS T *NEWNAME(P1 p1) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1))\
+ }\
+\
+ template <class T, class P1, class P2>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2))\
+ }\
+\
+ template <class T, class P1, class P2, class P3>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7, p8))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8, class P9>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7, p8, p9))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8, class P9, class P10>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9, P10 p10) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8, class P9, class P10, class P11>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9, P10 p10, P11 p11) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11))\
+ }\
+\
+ template <class T, class P1, class P2, class P3, class P4, class P5, class P6, class P7, class P8, class P9, class P10, class P11, class P12>\
+ QUALIFIERS T *NEWNAME(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8, P9 p9, P10 p10, P11 p11, P12 p12) {\
+ JS_NEW_BODY(ALLOCATOR, T, (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12))\
+ }\
+
+JS_DECLARE_NEW_METHODS(js_new, js_malloc, static JS_ALWAYS_INLINE)
+
+template <class T>
+static JS_ALWAYS_INLINE void
+js_delete(T *p)
+{
+ if (p) {
+ p->~T();
+ js_free(p);
+ }
+}
+
+template<class T>
+static JS_ALWAYS_INLINE void
+js_delete_poison(T *p)
+{
+ if (p) {
+ p->~T();
+ memset(p, 0x3B, sizeof(T));
+ js_free(p);
+ }
+}
+
+template <class T>
+static JS_ALWAYS_INLINE T *
+js_pod_malloc()
+{
+ return (T *)js_malloc(sizeof(T));
+}
+
+template <class T>
+static JS_ALWAYS_INLINE T *
+js_pod_calloc()
+{
+ return (T *)js_calloc(sizeof(T));
+}
+
+template <class T>
+static JS_ALWAYS_INLINE T *
+js_pod_malloc(size_t numElems)
+{
+ if (numElems & js::tl::MulOverflowMask<sizeof(T)>::result)
+ return NULL;
+ return (T *)js_malloc(numElems * sizeof(T));
+}
+
+template <class T>
+static JS_ALWAYS_INLINE T *
+js_pod_calloc(size_t numElems)
+{
+ if (numElems & js::tl::MulOverflowMask<sizeof(T)>::result)
+ return NULL;
+ return (T *)js_calloc(numElems * sizeof(T));
+}
+
+namespace js {
+
+template<typename T>
+struct ScopedFreePtrTraits
+{
+ typedef T* type;
+ static T* empty() { return NULL; }
+ static void release(T* ptr) { js_free(ptr); }
+};
+SCOPED_TEMPLATE(ScopedJSFreePtr, ScopedFreePtrTraits)
+
+template <typename T>
+struct ScopedDeletePtrTraits : public ScopedFreePtrTraits<T>
+{
+ static void release(T *ptr) { js_delete(ptr); }
+};
+SCOPED_TEMPLATE(ScopedJSDeletePtr, ScopedDeletePtrTraits)
+
+template <typename T>
+struct ScopedReleasePtrTraits : public ScopedFreePtrTraits<T>
+{
+ static void release(T *ptr) { if (ptr) ptr->release(); }
+};
+SCOPED_TEMPLATE(ScopedReleasePtr, ScopedReleasePtrTraits)
+
+} /* namespace js */
+
+namespace js {
+
+/*
+ * "Move" References
+ *
+ * Some types can be copied much more efficiently if we know the original's
+ * value need not be preserved --- that is, if we are doing a "move", not a
+ * "copy". For example, if we have:
+ *
+ * Vector<T> u;
+ * Vector<T> v(u);
+ *
+ * the constructor for v must apply a copy constructor to each element of u ---
+ * taking time linear in the length of u. However, if we know we will not need u
+ * any more once v has been initialized, then we could initialize v very
+ * efficiently simply by stealing u's dynamically allocated buffer and giving it
+ * to v --- a constant-time operation, regardless of the size of u.
+ *
+ * Moves often appear in container implementations. For example, when we append
+ * to a vector, we may need to resize its buffer. This entails moving each of
+ * its extant elements from the old, smaller buffer to the new, larger buffer.
+ * But once the elements have been migrated, we're just going to throw away the
+ * old buffer; we don't care if they still have their values. So if the vector's
+ * element type can implement "move" more efficiently than "copy", the vector
+ * resizing should by all means use a "move" operation. Hash tables also need to
+ * be resized.
+ *
+ * The details of the optimization, and whether it's worth applying, vary from
+ * one type to the next. And while some constructor calls are moves, many really
+ * are copies, and can't be optimized this way. So we need:
+ *
+ * 1) a way for a particular invocation of a copy constructor to say that it's
+ * really a move, and that the value of the original isn't important
+ * afterwards (althought it must still be safe to destroy); and
+ *
+ * 2) a way for a type (like Vector) to announce that it can be moved more
+ * efficiently than it can be copied, and provide an implementation of that
+ * move operation.
+ *
+ * The Move(T &) function takes a reference to a T, and returns an MoveRef<T>
+ * referring to the same value; that's 1). An MoveRef<T> is simply a reference
+ * to a T, annotated to say that a copy constructor applied to it may move that
+ * T, instead of copying it. Finally, a constructor that accepts an MoveRef<T>
+ * should perform a more efficient move, instead of a copy, providing 2).
+ *
+ * So, where we might define a copy constructor for a class C like this:
+ *
+ * C(const C &rhs) { ... copy rhs to this ... }
+ *
+ * we would declare a move constructor like this:
+ *
+ * C(MoveRef<C> rhs) { ... move rhs to this ... }
+ *
+ * And where we might perform a copy like this:
+ *
+ * C c2(c1);
+ *
+ * we would perform a move like this:
+ *
+ * C c2(Move(c1))
+ *
+ * Note that MoveRef<T> implicitly converts to T &, so you can pass an
+ * MoveRef<T> to an ordinary copy constructor for a type that doesn't support a
+ * special move constructor, and you'll just get a copy. This means that
+ * templates can use Move whenever they know they won't use the original value
+ * any more, even if they're not sure whether the type at hand has a specialized
+ * move constructor. If it doesn't, the MoveRef<T> will just convert to a T &,
+ * and the ordinary copy constructor will apply.
+ *
+ * A class with a move constructor can also provide a move assignment operator,
+ * which runs this's destructor, and then applies the move constructor to
+ * *this's memory. A typical definition:
+ *
+ * C &operator=(MoveRef<C> rhs) {
+ * this->~C();
+ * new(this) C(rhs);
+ * return *this;
+ * }
+ *
+ * With that in place, one can write move assignments like this:
+ *
+ * c2 = Move(c1);
+ *
+ * This destroys c1, moves c1's value to c2, and leaves c1 in an undefined but
+ * destructible state.
+ *
+ * This header file defines MoveRef and Move in the js namespace. It's up to
+ * individual containers to annotate moves as such, by calling Move; and it's up
+ * to individual types to define move constructors.
+ *
+ * One hint: if you're writing a move constructor where the type has members
+ * that should be moved themselves, it's much nicer to write this:
+ *
+ * C(MoveRef<C> c) : x(c->x), y(c->y) { }
+ *
+ * than the equivalent:
+ *
+ * C(MoveRef<C> c) { new(&x) X(c->x); new(&y) Y(c->y); }
+ *
+ * especially since GNU C++ fails to notice that this does indeed initialize x
+ * and y, which may matter if they're const.
+ */
+template<typename T>
+class MoveRef {
+ public:
+ typedef T Referent;
+ explicit MoveRef(T &t) : pointer(&t) { }
+ T &operator*() const { return *pointer; }
+ T *operator->() const { return pointer; }
+ operator T& () const { return *pointer; }
+ private:
+ T *pointer;
+};
+
+template<typename T>
+MoveRef<T> Move(T &t) { return MoveRef<T>(t); }
+
+template<typename T>
+MoveRef<T> Move(const T &t) { return MoveRef<T>(const_cast<T &>(t)); }
+
+/* Useful for implementing containers that assert non-reentrancy */
+class ReentrancyGuard
+{
+ /* ReentrancyGuard is not copyable. */
+ ReentrancyGuard(const ReentrancyGuard &);
+ void operator=(const ReentrancyGuard &);
+
+#ifdef JS_DEBUG
+ bool &entered;
+#endif
+ public:
+ template <class T>
+#ifdef JS_DEBUG
+ ReentrancyGuard(T &obj)
+ : entered(obj.entered)
+#else
+ ReentrancyGuard(T &/*obj*/)
+#endif
+ {
+#ifdef JS_DEBUG
+ JS_ASSERT(!entered);
+ entered = true;
+#endif
+ }
+ ~ReentrancyGuard()
+ {
+#ifdef JS_DEBUG
+ entered = false;
+#endif
+ }
+};
+
+template <class T>
+JS_ALWAYS_INLINE static void
+Swap(T &t, T &u)
+{
+ T tmp(Move(t));
+ t = Move(u);
+ u = Move(tmp);
+}
+
+/*
+ * Round x up to the nearest power of 2. This function assumes that the most
+ * significant bit of x is not set, which would lead to overflow.
+ */
+JS_ALWAYS_INLINE size_t
+RoundUpPow2(size_t x)
+{
+ return size_t(1) << JS_CEILING_LOG2W(x);
+}
+
+/* Integral types for all hash functions. */
+typedef uint32_t HashNumber;
+const unsigned HashNumberSizeBits = 32;
+
+namespace detail {
+
+/*
+ * Given a raw hash code, h, return a number that can be used to select a hash
+ * bucket.
+ *
+ * This function aims to produce as uniform an output distribution as possible,
+ * especially in the most significant (leftmost) bits, even though the input
+ * distribution may be highly nonrandom, given the constraints that this must
+ * be deterministic and quick to compute.
+ *
+ * Since the leftmost bits of the result are best, the hash bucket index is
+ * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
+ * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
+ *
+ * FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
+ */
+inline HashNumber
+ScrambleHashCode(HashNumber h)
+{
+ /*
+ * Simply returning h would not cause any hash tables to produce wrong
+ * answers. But it can produce pathologically bad performance: The caller
+ * right-shifts the result, keeping only the highest bits. The high bits of
+ * hash codes are very often completely entropy-free. (So are the lowest
+ * bits.)
+ *
+ * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
+ * Programming, 6.4. This mixes all the bits of the input hash code h.
+ *
+ * The value of goldenRatio is taken from the hex
+ * expansion of the golden ratio, which starts 1.9E3779B9....
+ * This value is especially good if values with consecutive hash codes
+ * are stored in a hash table; see Knuth for details.
+ */
+ static const HashNumber goldenRatio = 0x9E3779B9U;
+ return h * goldenRatio;
+}
+
+} /* namespace detail */
+
+} /* namespace js */
+
+namespace JS {
+
+/*
+ * Methods for poisoning GC heap pointer words and checking for poisoned words.
+ * These are in this file for use in Value methods and so forth.
+ *
+ * If the moving GC hazard analysis is in use and detects a non-rooted stack
+ * pointer to a GC thing, one byte of that pointer is poisoned to refer to an
+ * invalid location. For both 32 bit and 64 bit systems, the fourth byte of the
+ * pointer is overwritten, to reduce the likelihood of accidentally changing
+ * a live integer value.
+ */
+
+inline void PoisonPtr(void *v)
+{
+#if defined(JSGC_ROOT_ANALYSIS) && defined(JS_DEBUG)
+ uint8_t *ptr = (uint8_t *) v + 3;
+ *ptr = JS_FREE_PATTERN;
+#endif
+}
+
+template <typename T>
+inline bool IsPoisonedPtr(T *v)
+{
+#if defined(JSGC_ROOT_ANALYSIS) && defined(JS_DEBUG)
+ uint32_t mask = uintptr_t(v) & 0xff000000;
+ return mask == uint32_t(JS_FREE_PATTERN << 24);
+#else
+ return false;
+#endif
+}
+
+}
+
+/*
+ * This is SpiderMonkey's equivalent to |nsMallocSizeOfFun|.
+ */
+typedef size_t(*JSMallocSizeOfFun)(const void *p);
+
+/* sixgill annotation defines */
+#ifndef HAVE_STATIC_ANNOTATIONS
+# define HAVE_STATIC_ANNOTATIONS
+# ifdef XGILL_PLUGIN
+# define STATIC_PRECONDITION(COND) __attribute__((precondition(#COND)))
+# define STATIC_PRECONDITION_ASSUME(COND) __attribute__((precondition_assume(#COND)))
+# define STATIC_POSTCONDITION(COND) __attribute__((postcondition(#COND)))
+# define STATIC_POSTCONDITION_ASSUME(COND) __attribute__((postcondition_assume(#COND)))
+# define STATIC_INVARIANT(COND) __attribute__((invariant(#COND)))
+# define STATIC_INVARIANT_ASSUME(COND) __attribute__((invariant_assume(#COND)))
+# define STATIC_PASTE2(X,Y) X ## Y
+# define STATIC_PASTE1(X,Y) STATIC_PASTE2(X,Y)
+# define STATIC_ASSERT(COND) \
+ JS_BEGIN_MACRO \
+ __attribute__((assert_static(#COND), unused)) \
+ int STATIC_PASTE1(assert_static_, __COUNTER__); \
+ JS_END_MACRO
+# define STATIC_ASSUME(COND) \
+ JS_BEGIN_MACRO \
+ __attribute__((assume_static(#COND), unused)) \
+ int STATIC_PASTE1(assume_static_, __COUNTER__); \
+ JS_END_MACRO
+# define STATIC_ASSERT_RUNTIME(COND) \
+ JS_BEGIN_MACRO \
+ __attribute__((assert_static_runtime(#COND), unused)) \
+ int STATIC_PASTE1(assert_static_runtime_, __COUNTER__); \
+ JS_END_MACRO
+# else /* XGILL_PLUGIN */
+# define STATIC_PRECONDITION(COND) /* nothing */
+# define STATIC_PRECONDITION_ASSUME(COND) /* nothing */
+# define STATIC_POSTCONDITION(COND) /* nothing */
+# define STATIC_POSTCONDITION_ASSUME(COND) /* nothing */
+# define STATIC_INVARIANT(COND) /* nothing */
+# define STATIC_INVARIANT_ASSUME(COND) /* nothing */
+# define STATIC_ASSERT(COND) JS_BEGIN_MACRO /* nothing */ JS_END_MACRO
+# define STATIC_ASSUME(COND) JS_BEGIN_MACRO /* nothing */ JS_END_MACRO
+# define STATIC_ASSERT_RUNTIME(COND) JS_BEGIN_MACRO /* nothing */ JS_END_MACRO
+# endif /* XGILL_PLUGIN */
+# define STATIC_SKIP_INFERENCE STATIC_INVARIANT(skip_inference())
+#endif /* HAVE_STATIC_ANNOTATIONS */
+
+#endif /* js_Utility_h */
diff --git a/js/public/Value.h b/js/public/Value.h
new file mode 100644
index 0000000..72b3f38
--- /dev/null
+++ b/js/public/Value.h
@@ -0,0 +1,1844 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS::Value implementation. */
+
+#ifndef js_Value_h
+#define js_Value_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+
+#include <limits> /* for std::numeric_limits */
+
+#include "js/Anchor.h"
+#include "js/RootingAPI.h"
+#include "js/Utility.h"
+
+namespace JS { class Value; }
+
+/* JS::Value can store a full int32_t. */
+#define JSVAL_INT_BITS 32
+#define JSVAL_INT_MIN ((int32_t)0x80000000)
+#define JSVAL_INT_MAX ((int32_t)0x7fffffff)
+
+/*
+ * Try to get jsvals 64-bit aligned. We could almost assert that all values are
+ * aligned, but MSVC and GCC occasionally break alignment.
+ */
+#if defined(__GNUC__) || defined(__xlc__) || defined(__xlC__)
+# define JSVAL_ALIGNMENT __attribute__((aligned (8)))
+#elif defined(_MSC_VER)
+ /*
+ * Structs can be aligned with MSVC, but not if they are used as parameters,
+ * so we just don't try to align.
+ */
+# define JSVAL_ALIGNMENT
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+# define JSVAL_ALIGNMENT
+#elif defined(__HP_cc) || defined(__HP_aCC)
+# define JSVAL_ALIGNMENT
+#endif
+
+#if JS_BITS_PER_WORD == 64
+# define JSVAL_TAG_SHIFT 47
+#endif
+
+/*
+ * We try to use enums so that printing a jsval_layout in the debugger shows
+ * nice symbolic type tags, however we can only do this when we can force the
+ * underlying type of the enum to be the desired size.
+ */
+#if defined(__cplusplus) && !defined(__SUNPRO_CC) && !defined(__xlC__)
+
+#if defined(_MSC_VER)
+# define JS_ENUM_HEADER(id, type) enum id : type
+# define JS_ENUM_FOOTER(id)
+#else
+# define JS_ENUM_HEADER(id, type) enum id
+# define JS_ENUM_FOOTER(id) __attribute__((packed))
+#endif
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueType, uint8_t)
+{
+ JSVAL_TYPE_DOUBLE = 0x00,
+ JSVAL_TYPE_INT32 = 0x01,
+ JSVAL_TYPE_UNDEFINED = 0x02,
+ JSVAL_TYPE_BOOLEAN = 0x03,
+ JSVAL_TYPE_MAGIC = 0x04,
+ JSVAL_TYPE_STRING = 0x05,
+ JSVAL_TYPE_NULL = 0x06,
+ JSVAL_TYPE_OBJECT = 0x07,
+
+ /* These never appear in a jsval; they are only provided as an out-of-band value. */
+ JSVAL_TYPE_UNKNOWN = 0x20,
+ JSVAL_TYPE_MISSING = 0x21
+} JS_ENUM_FOOTER(JSValueType);
+
+JS_STATIC_ASSERT(sizeof(JSValueType) == 1);
+
+#if JS_BITS_PER_WORD == 32
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueTag, uint32_t)
+{
+ JSVAL_TAG_CLEAR = 0xFFFFFF80,
+ JSVAL_TAG_INT32 = JSVAL_TAG_CLEAR | JSVAL_TYPE_INT32,
+ JSVAL_TAG_UNDEFINED = JSVAL_TAG_CLEAR | JSVAL_TYPE_UNDEFINED,
+ JSVAL_TAG_STRING = JSVAL_TAG_CLEAR | JSVAL_TYPE_STRING,
+ JSVAL_TAG_BOOLEAN = JSVAL_TAG_CLEAR | JSVAL_TYPE_BOOLEAN,
+ JSVAL_TAG_MAGIC = JSVAL_TAG_CLEAR | JSVAL_TYPE_MAGIC,
+ JSVAL_TAG_NULL = JSVAL_TAG_CLEAR | JSVAL_TYPE_NULL,
+ JSVAL_TAG_OBJECT = JSVAL_TAG_CLEAR | JSVAL_TYPE_OBJECT
+} JS_ENUM_FOOTER(JSValueTag);
+
+JS_STATIC_ASSERT(sizeof(JSValueTag) == 4);
+
+#elif JS_BITS_PER_WORD == 64
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueTag, uint32_t)
+{
+ JSVAL_TAG_MAX_DOUBLE = 0x1FFF0,
+ JSVAL_TAG_INT32 = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_INT32,
+ JSVAL_TAG_UNDEFINED = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_UNDEFINED,
+ JSVAL_TAG_STRING = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_STRING,
+ JSVAL_TAG_BOOLEAN = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_BOOLEAN,
+ JSVAL_TAG_MAGIC = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_MAGIC,
+ JSVAL_TAG_NULL = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_NULL,
+ JSVAL_TAG_OBJECT = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_OBJECT
+} JS_ENUM_FOOTER(JSValueTag);
+
+JS_STATIC_ASSERT(sizeof(JSValueTag) == sizeof(uint32_t));
+
+JS_ENUM_HEADER(JSValueShiftedTag, uint64_t)
+{
+ JSVAL_SHIFTED_TAG_MAX_DOUBLE = ((((uint64_t)JSVAL_TAG_MAX_DOUBLE) << JSVAL_TAG_SHIFT) | 0xFFFFFFFF),
+ JSVAL_SHIFTED_TAG_INT32 = (((uint64_t)JSVAL_TAG_INT32) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_UNDEFINED = (((uint64_t)JSVAL_TAG_UNDEFINED) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_STRING = (((uint64_t)JSVAL_TAG_STRING) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_BOOLEAN = (((uint64_t)JSVAL_TAG_BOOLEAN) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_MAGIC = (((uint64_t)JSVAL_TAG_MAGIC) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_NULL = (((uint64_t)JSVAL_TAG_NULL) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_OBJECT = (((uint64_t)JSVAL_TAG_OBJECT) << JSVAL_TAG_SHIFT)
+} JS_ENUM_FOOTER(JSValueShiftedTag);
+
+JS_STATIC_ASSERT(sizeof(JSValueShiftedTag) == sizeof(uint64_t));
+
+#endif
+
+#else /* defined(__cplusplus) */
+
+typedef uint8_t JSValueType;
+#define JSVAL_TYPE_DOUBLE ((uint8_t)0x00)
+#define JSVAL_TYPE_INT32 ((uint8_t)0x01)
+#define JSVAL_TYPE_UNDEFINED ((uint8_t)0x02)
+#define JSVAL_TYPE_BOOLEAN ((uint8_t)0x03)
+#define JSVAL_TYPE_MAGIC ((uint8_t)0x04)
+#define JSVAL_TYPE_STRING ((uint8_t)0x05)
+#define JSVAL_TYPE_NULL ((uint8_t)0x06)
+#define JSVAL_TYPE_OBJECT ((uint8_t)0x07)
+#define JSVAL_TYPE_UNKNOWN ((uint8_t)0x20)
+
+#if JS_BITS_PER_WORD == 32
+
+typedef uint32_t JSValueTag;
+#define JSVAL_TAG_CLEAR ((uint32_t)(0xFFFFFF80))
+#define JSVAL_TAG_INT32 ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_INT32))
+#define JSVAL_TAG_UNDEFINED ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_UNDEFINED))
+#define JSVAL_TAG_STRING ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_STRING))
+#define JSVAL_TAG_BOOLEAN ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_BOOLEAN))
+#define JSVAL_TAG_MAGIC ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_MAGIC))
+#define JSVAL_TAG_NULL ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_NULL))
+#define JSVAL_TAG_OBJECT ((uint32_t)(JSVAL_TAG_CLEAR | JSVAL_TYPE_OBJECT))
+
+#elif JS_BITS_PER_WORD == 64
+
+typedef uint32_t JSValueTag;
+#define JSVAL_TAG_MAX_DOUBLE ((uint32_t)(0x1FFF0))
+#define JSVAL_TAG_INT32 (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_INT32)
+#define JSVAL_TAG_UNDEFINED (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_UNDEFINED)
+#define JSVAL_TAG_STRING (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_STRING)
+#define JSVAL_TAG_BOOLEAN (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_BOOLEAN)
+#define JSVAL_TAG_MAGIC (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_MAGIC)
+#define JSVAL_TAG_NULL (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_NULL)
+#define JSVAL_TAG_OBJECT (uint32_t)(JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_OBJECT)
+
+typedef uint64_t JSValueShiftedTag;
+#define JSVAL_SHIFTED_TAG_MAX_DOUBLE ((((uint64_t)JSVAL_TAG_MAX_DOUBLE) << JSVAL_TAG_SHIFT) | 0xFFFFFFFF)
+#define JSVAL_SHIFTED_TAG_INT32 (((uint64_t)JSVAL_TAG_INT32) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_UNDEFINED (((uint64_t)JSVAL_TAG_UNDEFINED) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_STRING (((uint64_t)JSVAL_TAG_STRING) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_BOOLEAN (((uint64_t)JSVAL_TAG_BOOLEAN) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_MAGIC (((uint64_t)JSVAL_TAG_MAGIC) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_NULL (((uint64_t)JSVAL_TAG_NULL) << JSVAL_TAG_SHIFT)
+#define JSVAL_SHIFTED_TAG_OBJECT (((uint64_t)JSVAL_TAG_OBJECT) << JSVAL_TAG_SHIFT)
+
+#endif /* JS_BITS_PER_WORD */
+#endif /* defined(__cplusplus) && !defined(__SUNPRO_CC) */
+
+#define JSVAL_LOWER_INCL_TYPE_OF_OBJ_OR_NULL_SET JSVAL_TYPE_NULL
+#define JSVAL_UPPER_EXCL_TYPE_OF_PRIMITIVE_SET JSVAL_TYPE_OBJECT
+#define JSVAL_UPPER_INCL_TYPE_OF_NUMBER_SET JSVAL_TYPE_INT32
+#define JSVAL_LOWER_INCL_TYPE_OF_PTR_PAYLOAD_SET JSVAL_TYPE_MAGIC
+
+#if JS_BITS_PER_WORD == 32
+
+#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_CLEAR | (type)))
+
+#define JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET JSVAL_TAG_NULL
+#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT
+#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32
+#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING
+
+#elif JS_BITS_PER_WORD == 64
+
+#define JSVAL_PAYLOAD_MASK 0x00007FFFFFFFFFFFLL
+#define JSVAL_TAG_MASK 0xFFFF800000000000LL
+#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_MAX_DOUBLE | (type)))
+#define JSVAL_TYPE_TO_SHIFTED_TAG(type) (((uint64_t)JSVAL_TYPE_TO_TAG(type)) << JSVAL_TAG_SHIFT)
+
+#define JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET JSVAL_TAG_NULL
+#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT
+#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32
+#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING
+
+#define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_OBJ_OR_NULL_SET JSVAL_SHIFTED_TAG_NULL
+#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET JSVAL_SHIFTED_TAG_OBJECT
+#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET JSVAL_SHIFTED_TAG_UNDEFINED
+#define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET JSVAL_SHIFTED_TAG_STRING
+
+#endif /* JS_BITS_PER_WORD */
+
+typedef enum JSWhyMagic
+{
+ JS_ELEMENTS_HOLE, /* a hole in a native object's elements */
+ JS_NATIVE_ENUMERATE, /* indicates that a custom enumerate hook forwarded
+ * to JS_EnumerateState, which really means the object can be
+ * enumerated like a native object. */
+ JS_NO_ITER_VALUE, /* there is not a pending iterator value */
+ JS_GENERATOR_CLOSING, /* exception value thrown when closing a generator */
+ JS_NO_CONSTANT, /* compiler sentinel value */
+ JS_THIS_POISON, /* used in debug builds to catch tracing errors */
+ JS_ARG_POISON, /* used in debug builds to catch tracing errors */
+ JS_SERIALIZE_NO_NODE, /* an empty subnode in the AST serializer */
+ JS_LAZY_ARGUMENTS, /* lazy arguments value on the stack */
+ JS_OPTIMIZED_ARGUMENTS, /* optimized-away 'arguments' value */
+ JS_IS_CONSTRUCTING, /* magic value passed to natives to indicate construction */
+ JS_OVERWRITTEN_CALLEE, /* arguments.callee has been overwritten */
+ JS_FORWARD_TO_CALL_OBJECT, /* args object element stored in call object */
+ JS_BLOCK_NEEDS_CLONE, /* value of static block object slot */
+ JS_HASH_KEY_EMPTY, /* see class js::HashableValue */
+ JS_ION_ERROR, /* error while running Ion code */
+ JS_ION_BAILOUT, /* status code to signal EnterIon will OSR into Interpret */
+ JS_GENERIC_MAGIC /* for local use */
+} JSWhyMagic;
+
+#if defined(IS_LITTLE_ENDIAN)
+# if JS_BITS_PER_WORD == 32
+typedef union jsval_layout
+{
+ uint64_t asBits;
+ struct {
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSBool boo;
+ JSString *str;
+ JSObject *obj;
+ void *ptr;
+ JSWhyMagic why;
+ size_t word;
+ uintptr_t uintptr;
+ } payload;
+ JSValueTag tag;
+ } s;
+ double asDouble;
+ void *asPtr;
+} JSVAL_ALIGNMENT jsval_layout;
+# elif JS_BITS_PER_WORD == 64
+typedef union jsval_layout
+{
+ uint64_t asBits;
+#if (!defined(_WIN64) && defined(__cplusplus))
+ /* MSVC does not pack these correctly :-( */
+ struct {
+ uint64_t payload47 : 47;
+ JSValueTag tag : 17;
+ } debugView;
+#endif
+ struct {
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSWhyMagic why;
+ } payload;
+ } s;
+ double asDouble;
+ void *asPtr;
+ size_t asWord;
+ uintptr_t asUIntPtr;
+} JSVAL_ALIGNMENT jsval_layout;
+# endif /* JS_BITS_PER_WORD */
+#else /* defined(IS_LITTLE_ENDIAN) */
+# if JS_BITS_PER_WORD == 32
+typedef union jsval_layout
+{
+ uint64_t asBits;
+ struct {
+ JSValueTag tag;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSBool boo;
+ JSString *str;
+ JSObject *obj;
+ void *ptr;
+ JSWhyMagic why;
+ size_t word;
+ uintptr_t uintptr;
+ } payload;
+ } s;
+ double asDouble;
+ void *asPtr;
+} JSVAL_ALIGNMENT jsval_layout;
+# elif JS_BITS_PER_WORD == 64
+typedef union jsval_layout
+{
+ uint64_t asBits;
+ struct {
+ JSValueTag tag : 17;
+ uint64_t payload47 : 47;
+ } debugView;
+ struct {
+ uint32_t padding;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSWhyMagic why;
+ } payload;
+ } s;
+ double asDouble;
+ void *asPtr;
+ size_t asWord;
+ uintptr_t asUIntPtr;
+} JSVAL_ALIGNMENT jsval_layout;
+# endif /* JS_BITS_PER_WORD */
+#endif /* defined(IS_LITTLE_ENDIAN) */
+
+JS_STATIC_ASSERT(sizeof(jsval_layout) == 8);
+
+#if JS_BITS_PER_WORD == 32
+
+/*
+ * N.B. GCC, in some but not all cases, chooses to emit signed comparison of
+ * JSValueTag even though its underlying type has been forced to be uint32_t.
+ * Thus, all comparisons should explicitly cast operands to uint32_t.
+ */
+
+static inline jsval_layout
+BUILD_JSVAL(JSValueTag tag, uint32_t payload)
+{
+ jsval_layout l;
+ l.asBits = (((uint64_t)(uint32_t)tag) << 32) | payload;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_DOUBLE_IMPL(jsval_layout l)
+{
+ return (uint32_t)l.s.tag <= (uint32_t)JSVAL_TAG_CLEAR;
+}
+
+static inline jsval_layout
+DOUBLE_TO_JSVAL_IMPL(double d)
+{
+ jsval_layout l;
+ l.asDouble = d;
+ MOZ_ASSERT(JSVAL_IS_DOUBLE_IMPL(l));
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_INT32_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_INT32;
+}
+
+static inline int32_t
+JSVAL_TO_INT32_IMPL(jsval_layout l)
+{
+ return l.s.payload.i32;
+}
+
+static inline jsval_layout
+INT32_TO_JSVAL_IMPL(int32_t i)
+{
+ jsval_layout l;
+ l.s.tag = JSVAL_TAG_INT32;
+ l.s.payload.i32 = i;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_NUMBER_IMPL(jsval_layout l)
+{
+ JSValueTag tag = l.s.tag;
+ MOZ_ASSERT(tag != JSVAL_TAG_CLEAR);
+ return (uint32_t)tag <= (uint32_t)JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET;
+}
+
+static inline JSBool
+JSVAL_IS_UNDEFINED_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_UNDEFINED;
+}
+
+static inline JSBool
+JSVAL_IS_STRING_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_STRING;
+}
+
+static inline jsval_layout
+STRING_TO_JSVAL_IMPL(JSString *str)
+{
+ jsval_layout l;
+ MOZ_ASSERT(str);
+ l.s.tag = JSVAL_TAG_STRING;
+ l.s.payload.str = str;
+ return l;
+}
+
+static inline JSString *
+JSVAL_TO_STRING_IMPL(jsval_layout l)
+{
+ return l.s.payload.str;
+}
+
+static inline JSBool
+JSVAL_IS_BOOLEAN_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_BOOLEAN;
+}
+
+static inline JSBool
+JSVAL_TO_BOOLEAN_IMPL(jsval_layout l)
+{
+ return l.s.payload.boo;
+}
+
+static inline jsval_layout
+BOOLEAN_TO_JSVAL_IMPL(JSBool b)
+{
+ jsval_layout l;
+ MOZ_ASSERT(b == JS_TRUE || b == JS_FALSE);
+ l.s.tag = JSVAL_TAG_BOOLEAN;
+ l.s.payload.boo = b;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_MAGIC_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_MAGIC;
+}
+
+static inline JSBool
+JSVAL_IS_OBJECT_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_OBJECT;
+}
+
+static inline JSBool
+JSVAL_IS_PRIMITIVE_IMPL(jsval_layout l)
+{
+ return (uint32_t)l.s.tag < (uint32_t)JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET;
+}
+
+static inline JSBool
+JSVAL_IS_OBJECT_OR_NULL_IMPL(jsval_layout l)
+{
+ MOZ_ASSERT((uint32_t)l.s.tag <= (uint32_t)JSVAL_TAG_OBJECT);
+ return (uint32_t)l.s.tag >= (uint32_t)JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET;
+}
+
+static inline JSObject *
+JSVAL_TO_OBJECT_IMPL(jsval_layout l)
+{
+ return l.s.payload.obj;
+}
+
+static inline jsval_layout
+OBJECT_TO_JSVAL_IMPL(JSObject *obj)
+{
+ jsval_layout l;
+ MOZ_ASSERT(obj);
+ l.s.tag = JSVAL_TAG_OBJECT;
+ l.s.payload.obj = obj;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_NULL_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_NULL;
+}
+
+static inline jsval_layout
+PRIVATE_PTR_TO_JSVAL_IMPL(void *ptr)
+{
+ jsval_layout l;
+ MOZ_ASSERT(((uint32_t)ptr & 1) == 0);
+ l.s.tag = (JSValueTag)0;
+ l.s.payload.ptr = ptr;
+ MOZ_ASSERT(JSVAL_IS_DOUBLE_IMPL(l));
+ return l;
+}
+
+static inline void *
+JSVAL_TO_PRIVATE_PTR_IMPL(jsval_layout l)
+{
+ return l.s.payload.ptr;
+}
+
+static inline JSBool
+JSVAL_IS_GCTHING_IMPL(jsval_layout l)
+{
+ /* gcc sometimes generates signed < without explicit casts. */
+ return (uint32_t)l.s.tag >= (uint32_t)JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET;
+}
+
+static inline void *
+JSVAL_TO_GCTHING_IMPL(jsval_layout l)
+{
+ return l.s.payload.ptr;
+}
+
+static inline JSBool
+JSVAL_IS_TRACEABLE_IMPL(jsval_layout l)
+{
+ return l.s.tag == JSVAL_TAG_STRING || l.s.tag == JSVAL_TAG_OBJECT;
+}
+
+static inline uint32_t
+JSVAL_TRACE_KIND_IMPL(jsval_layout l)
+{
+ return (uint32_t)(JSBool)JSVAL_IS_STRING_IMPL(l);
+}
+
+static inline JSBool
+JSVAL_IS_SPECIFIC_INT32_IMPL(jsval_layout l, int32_t i32)
+{
+ return l.s.tag == JSVAL_TAG_INT32 && l.s.payload.i32 == i32;
+}
+
+static inline JSBool
+JSVAL_IS_SPECIFIC_BOOLEAN(jsval_layout l, JSBool b)
+{
+ return (l.s.tag == JSVAL_TAG_BOOLEAN) && (l.s.payload.boo == b);
+}
+
+static inline jsval_layout
+MAGIC_TO_JSVAL_IMPL(JSWhyMagic why)
+{
+ jsval_layout l;
+ l.s.tag = JSVAL_TAG_MAGIC;
+ l.s.payload.why = why;
+ return l;
+}
+
+static inline JSBool
+JSVAL_SAME_TYPE_IMPL(jsval_layout lhs, jsval_layout rhs)
+{
+ JSValueTag ltag = lhs.s.tag, rtag = rhs.s.tag;
+ return ltag == rtag || (ltag < JSVAL_TAG_CLEAR && rtag < JSVAL_TAG_CLEAR);
+}
+
+static inline JSValueType
+JSVAL_EXTRACT_NON_DOUBLE_TYPE_IMPL(jsval_layout l)
+{
+ uint32_t type = l.s.tag & 0xF;
+ MOZ_ASSERT(type > JSVAL_TYPE_DOUBLE);
+ return (JSValueType)type;
+}
+
+#elif JS_BITS_PER_WORD == 64
+
+static inline jsval_layout
+BUILD_JSVAL(JSValueTag tag, uint64_t payload)
+{
+ jsval_layout l;
+ l.asBits = (((uint64_t)(uint32_t)tag) << JSVAL_TAG_SHIFT) | payload;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_DOUBLE_IMPL(jsval_layout l)
+{
+ return l.asBits <= JSVAL_SHIFTED_TAG_MAX_DOUBLE;
+}
+
+static inline jsval_layout
+DOUBLE_TO_JSVAL_IMPL(double d)
+{
+ jsval_layout l;
+ l.asDouble = d;
+ MOZ_ASSERT(l.asBits <= JSVAL_SHIFTED_TAG_MAX_DOUBLE);
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_INT32_IMPL(jsval_layout l)
+{
+ return (uint32_t)(l.asBits >> JSVAL_TAG_SHIFT) == JSVAL_TAG_INT32;
+}
+
+static inline int32_t
+JSVAL_TO_INT32_IMPL(jsval_layout l)
+{
+ return (int32_t)l.asBits;
+}
+
+static inline jsval_layout
+INT32_TO_JSVAL_IMPL(int32_t i32)
+{
+ jsval_layout l;
+ l.asBits = ((uint64_t)(uint32_t)i32) | JSVAL_SHIFTED_TAG_INT32;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_NUMBER_IMPL(jsval_layout l)
+{
+ return l.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET;
+}
+
+static inline JSBool
+JSVAL_IS_UNDEFINED_IMPL(jsval_layout l)
+{
+ return l.asBits == JSVAL_SHIFTED_TAG_UNDEFINED;
+}
+
+static inline JSBool
+JSVAL_IS_STRING_IMPL(jsval_layout l)
+{
+ return (uint32_t)(l.asBits >> JSVAL_TAG_SHIFT) == JSVAL_TAG_STRING;
+}
+
+static inline jsval_layout
+STRING_TO_JSVAL_IMPL(JSString *str)
+{
+ jsval_layout l;
+ uint64_t strBits = (uint64_t)str;
+ MOZ_ASSERT(str);
+ MOZ_ASSERT((strBits >> JSVAL_TAG_SHIFT) == 0);
+ l.asBits = strBits | JSVAL_SHIFTED_TAG_STRING;
+ return l;
+}
+
+static inline JSString *
+JSVAL_TO_STRING_IMPL(jsval_layout l)
+{
+ return (JSString *)(l.asBits & JSVAL_PAYLOAD_MASK);
+}
+
+static inline JSBool
+JSVAL_IS_BOOLEAN_IMPL(jsval_layout l)
+{
+ return (uint32_t)(l.asBits >> JSVAL_TAG_SHIFT) == JSVAL_TAG_BOOLEAN;
+}
+
+static inline JSBool
+JSVAL_TO_BOOLEAN_IMPL(jsval_layout l)
+{
+ return (JSBool)l.asBits;
+}
+
+static inline jsval_layout
+BOOLEAN_TO_JSVAL_IMPL(JSBool b)
+{
+ jsval_layout l;
+ MOZ_ASSERT(b == JS_TRUE || b == JS_FALSE);
+ l.asBits = ((uint64_t)(uint32_t)b) | JSVAL_SHIFTED_TAG_BOOLEAN;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_MAGIC_IMPL(jsval_layout l)
+{
+ return (l.asBits >> JSVAL_TAG_SHIFT) == JSVAL_TAG_MAGIC;
+}
+
+static inline JSBool
+JSVAL_IS_PRIMITIVE_IMPL(jsval_layout l)
+{
+ return l.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET;
+}
+
+static inline JSBool
+JSVAL_IS_OBJECT_IMPL(jsval_layout l)
+{
+ MOZ_ASSERT((l.asBits >> JSVAL_TAG_SHIFT) <= JSVAL_SHIFTED_TAG_OBJECT);
+ return l.asBits >= JSVAL_SHIFTED_TAG_OBJECT;
+}
+
+static inline JSBool
+JSVAL_IS_OBJECT_OR_NULL_IMPL(jsval_layout l)
+{
+ MOZ_ASSERT((l.asBits >> JSVAL_TAG_SHIFT) <= JSVAL_TAG_OBJECT);
+ return l.asBits >= JSVAL_LOWER_INCL_SHIFTED_TAG_OF_OBJ_OR_NULL_SET;
+}
+
+static inline JSObject *
+JSVAL_TO_OBJECT_IMPL(jsval_layout l)
+{
+ uint64_t ptrBits = l.asBits & JSVAL_PAYLOAD_MASK;
+ MOZ_ASSERT((ptrBits & 0x7) == 0);
+ return (JSObject *)ptrBits;
+}
+
+static inline jsval_layout
+OBJECT_TO_JSVAL_IMPL(JSObject *obj)
+{
+ jsval_layout l;
+ uint64_t objBits = (uint64_t)obj;
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT((objBits >> JSVAL_TAG_SHIFT) == 0);
+ l.asBits = objBits | JSVAL_SHIFTED_TAG_OBJECT;
+ return l;
+}
+
+static inline JSBool
+JSVAL_IS_NULL_IMPL(jsval_layout l)
+{
+ return l.asBits == JSVAL_SHIFTED_TAG_NULL;
+}
+
+static inline JSBool
+JSVAL_IS_GCTHING_IMPL(jsval_layout l)
+{
+ return l.asBits >= JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET;
+}
+
+static inline void *
+JSVAL_TO_GCTHING_IMPL(jsval_layout l)
+{
+ uint64_t ptrBits = l.asBits & JSVAL_PAYLOAD_MASK;
+ MOZ_ASSERT((ptrBits & 0x7) == 0);
+ return (void *)ptrBits;
+}
+
+static inline JSBool
+JSVAL_IS_TRACEABLE_IMPL(jsval_layout l)
+{
+ return JSVAL_IS_GCTHING_IMPL(l) && !JSVAL_IS_NULL_IMPL(l);
+}
+
+static inline uint32_t
+JSVAL_TRACE_KIND_IMPL(jsval_layout l)
+{
+ return (uint32_t)(JSBool)!(JSVAL_IS_OBJECT_IMPL(l));
+}
+
+static inline jsval_layout
+PRIVATE_PTR_TO_JSVAL_IMPL(void *ptr)
+{
+ jsval_layout l;
+ uint64_t ptrBits = (uint64_t)ptr;
+ MOZ_ASSERT((ptrBits & 1) == 0);
+ l.asBits = ptrBits >> 1;
+ MOZ_ASSERT(JSVAL_IS_DOUBLE_IMPL(l));
+ return l;
+}
+
+static inline void *
+JSVAL_TO_PRIVATE_PTR_IMPL(jsval_layout l)
+{
+ MOZ_ASSERT((l.asBits & 0x8000000000000000LL) == 0);
+ return (void *)(l.asBits << 1);
+}
+
+static inline JSBool
+JSVAL_IS_SPECIFIC_INT32_IMPL(jsval_layout l, int32_t i32)
+{
+ return l.asBits == (((uint64_t)(uint32_t)i32) | JSVAL_SHIFTED_TAG_INT32);
+}
+
+static inline JSBool
+JSVAL_IS_SPECIFIC_BOOLEAN(jsval_layout l, JSBool b)
+{
+ return l.asBits == (((uint64_t)(uint32_t)b) | JSVAL_SHIFTED_TAG_BOOLEAN);
+}
+
+static inline jsval_layout
+MAGIC_TO_JSVAL_IMPL(JSWhyMagic why)
+{
+ jsval_layout l;
+ l.asBits = ((uint64_t)(uint32_t)why) | JSVAL_SHIFTED_TAG_MAGIC;
+ return l;
+}
+
+static inline JSBool
+JSVAL_SAME_TYPE_IMPL(jsval_layout lhs, jsval_layout rhs)
+{
+ uint64_t lbits = lhs.asBits, rbits = rhs.asBits;
+ return (lbits <= JSVAL_SHIFTED_TAG_MAX_DOUBLE && rbits <= JSVAL_SHIFTED_TAG_MAX_DOUBLE) ||
+ (((lbits ^ rbits) & 0xFFFF800000000000LL) == 0);
+}
+
+static inline JSValueType
+JSVAL_EXTRACT_NON_DOUBLE_TYPE_IMPL(jsval_layout l)
+{
+ uint64_t type = (l.asBits >> JSVAL_TAG_SHIFT) & 0xF;
+ MOZ_ASSERT(type > JSVAL_TYPE_DOUBLE);
+ return (JSValueType)type;
+}
+
+#endif /* JS_BITS_PER_WORD */
+
+static inline jsval_layout JSVAL_TO_IMPL(JS::Value v);
+static inline JS::Value IMPL_TO_JSVAL(jsval_layout l);
+
+namespace JS {
+
+/**
+ * Returns a generic quiet NaN value, with all payload bits set to zero.
+ *
+ * Among other properties, this NaN's bit pattern conforms to JS::Value's
+ * bit pattern restrictions.
+ */
+static MOZ_ALWAYS_INLINE double
+GenericNaN()
+{
+ return mozilla::SpecificNaN(0, 0x8000000000000ULL);
+}
+
+static inline double
+CanonicalizeNaN(double d)
+{
+ if (MOZ_UNLIKELY(mozilla::IsNaN(d)))
+ return GenericNaN();
+ return d;
+}
+
+/*
+ * JS::Value is the interface for a single JavaScript Engine value. A few
+ * general notes on JS::Value:
+ *
+ * - JS::Value has setX() and isX() members for X in
+ *
+ * { Int32, Double, String, Boolean, Undefined, Null, Object, Magic }
+ *
+ * JS::Value also contains toX() for each of the non-singleton types.
+ *
+ * - Magic is a singleton type whose payload contains a JSWhyMagic "reason" for
+ * the magic value. By providing JSWhyMagic values when creating and checking
+ * for magic values, it is possible to assert, at runtime, that only magic
+ * values with the expected reason flow through a particular value. For
+ * example, if cx->exception has a magic value, the reason must be
+ * JS_GENERATOR_CLOSING.
+ *
+ * - The JS::Value operations are preferred. The JSVAL_* operations remain for
+ * compatibility; they may be removed at some point. These operations mostly
+ * provide similar functionality. But there are a few key differences. One
+ * is that JS::Value gives null a separate type. Thus
+ *
+ * JSVAL_IS_OBJECT(v) === v.isObjectOrNull()
+ * !JSVAL_IS_PRIMITIVE(v) === v.isObject()
+ *
+ * Also, to help prevent mistakenly boxing a nullable JSObject* as an object,
+ * Value::setObject takes a JSObject&. (Conversely, Value::asObject returns a
+ * JSObject&.) A convenience member Value::setObjectOrNull is provided.
+ *
+ * - JSVAL_VOID is the same as the singleton value of the Undefined type.
+ *
+ * - Note that JS::Value is 8 bytes on 32 and 64-bit architectures. Thus, on
+ * 32-bit user code should avoid copying jsval/JS::Value as much as possible,
+ * preferring to pass by const Value &.
+ */
+class Value
+{
+ public:
+ /*
+ * N.B. the default constructor leaves Value unitialized. Adding a default
+ * constructor prevents Value from being stored in a union.
+ */
+
+ /*** Mutators ***/
+
+ void setNull() {
+ data.asBits = BUILD_JSVAL(JSVAL_TAG_NULL, 0).asBits;
+ }
+
+ void setUndefined() {
+ data.asBits = BUILD_JSVAL(JSVAL_TAG_UNDEFINED, 0).asBits;
+ }
+
+ void setInt32(int32_t i) {
+ data = INT32_TO_JSVAL_IMPL(i);
+ }
+
+ int32_t &getInt32Ref() {
+ MOZ_ASSERT(isInt32());
+ return data.s.payload.i32;
+ }
+
+ void setDouble(double d) {
+ data = DOUBLE_TO_JSVAL_IMPL(d);
+ }
+
+ double &getDoubleRef() {
+ MOZ_ASSERT(isDouble());
+ return data.asDouble;
+ }
+
+ void setString(JSString *str) {
+ MOZ_ASSERT(!IsPoisonedPtr(str));
+ data = STRING_TO_JSVAL_IMPL(str);
+ }
+
+ void setString(const JS::Anchor<JSString *> &str) {
+ setString(str.get());
+ }
+
+ void setObject(JSObject &obj) {
+ MOZ_ASSERT(!IsPoisonedPtr(&obj));
+ data = OBJECT_TO_JSVAL_IMPL(&obj);
+ }
+
+ void setBoolean(bool b) {
+ data = BOOLEAN_TO_JSVAL_IMPL(b);
+ }
+
+ void setMagic(JSWhyMagic why) {
+ data = MAGIC_TO_JSVAL_IMPL(why);
+ }
+
+ bool setNumber(uint32_t ui) {
+ if (ui > JSVAL_INT_MAX) {
+ setDouble((double)ui);
+ return false;
+ } else {
+ setInt32((int32_t)ui);
+ return true;
+ }
+ }
+
+ bool setNumber(double d) {
+ int32_t i;
+ if (mozilla::DoubleIsInt32(d, &i)) {
+ setInt32(i);
+ return true;
+ }
+
+ setDouble(d);
+ return false;
+ }
+
+ void setObjectOrNull(JSObject *arg) {
+ if (arg)
+ setObject(*arg);
+ else
+ setNull();
+ }
+
+ void swap(Value &rhs) {
+ uint64_t tmp = rhs.data.asBits;
+ rhs.data.asBits = data.asBits;
+ data.asBits = tmp;
+ }
+
+ /*** Value type queries ***/
+
+ bool isUndefined() const {
+ return JSVAL_IS_UNDEFINED_IMPL(data);
+ }
+
+ bool isNull() const {
+ return JSVAL_IS_NULL_IMPL(data);
+ }
+
+ bool isNullOrUndefined() const {
+ return isNull() || isUndefined();
+ }
+
+ bool isInt32() const {
+ return JSVAL_IS_INT32_IMPL(data);
+ }
+
+ bool isInt32(int32_t i32) const {
+ return JSVAL_IS_SPECIFIC_INT32_IMPL(data, i32);
+ }
+
+ bool isDouble() const {
+ return JSVAL_IS_DOUBLE_IMPL(data);
+ }
+
+ bool isNumber() const {
+ return JSVAL_IS_NUMBER_IMPL(data);
+ }
+
+ bool isString() const {
+ return JSVAL_IS_STRING_IMPL(data);
+ }
+
+ bool isObject() const {
+ return JSVAL_IS_OBJECT_IMPL(data);
+ }
+
+ bool isPrimitive() const {
+ return JSVAL_IS_PRIMITIVE_IMPL(data);
+ }
+
+ bool isObjectOrNull() const {
+ return JSVAL_IS_OBJECT_OR_NULL_IMPL(data);
+ }
+
+ bool isGCThing() const {
+ return JSVAL_IS_GCTHING_IMPL(data);
+ }
+
+ bool isBoolean() const {
+ return JSVAL_IS_BOOLEAN_IMPL(data);
+ }
+
+ bool isTrue() const {
+ return JSVAL_IS_SPECIFIC_BOOLEAN(data, true);
+ }
+
+ bool isFalse() const {
+ return JSVAL_IS_SPECIFIC_BOOLEAN(data, false);
+ }
+
+ bool isMagic() const {
+ return JSVAL_IS_MAGIC_IMPL(data);
+ }
+
+ bool isMagic(JSWhyMagic why) const {
+ MOZ_ASSERT_IF(isMagic(), data.s.payload.why == why);
+ return JSVAL_IS_MAGIC_IMPL(data);
+ }
+
+ bool isMarkable() const {
+ return JSVAL_IS_TRACEABLE_IMPL(data);
+ }
+
+ JSGCTraceKind gcKind() const {
+ MOZ_ASSERT(isMarkable());
+ return JSGCTraceKind(JSVAL_TRACE_KIND_IMPL(data));
+ }
+
+ JSWhyMagic whyMagic() const {
+ MOZ_ASSERT(isMagic());
+ return data.s.payload.why;
+ }
+
+ /*** Comparison ***/
+
+ bool operator==(const Value &rhs) const {
+ return data.asBits == rhs.data.asBits;
+ }
+
+ bool operator!=(const Value &rhs) const {
+ return data.asBits != rhs.data.asBits;
+ }
+
+ friend inline bool SameType(const Value &lhs, const Value &rhs);
+
+ /*** Extract the value's typed payload ***/
+
+ int32_t toInt32() const {
+ MOZ_ASSERT(isInt32());
+ return JSVAL_TO_INT32_IMPL(data);
+ }
+
+ double toDouble() const {
+ MOZ_ASSERT(isDouble());
+ return data.asDouble;
+ }
+
+ double toNumber() const {
+ MOZ_ASSERT(isNumber());
+ return isDouble() ? toDouble() : double(toInt32());
+ }
+
+ JSString *toString() const {
+ MOZ_ASSERT(isString());
+ return JSVAL_TO_STRING_IMPL(data);
+ }
+
+ JSObject &toObject() const {
+ MOZ_ASSERT(isObject());
+ return *JSVAL_TO_OBJECT_IMPL(data);
+ }
+
+ JSObject *toObjectOrNull() const {
+ MOZ_ASSERT(isObjectOrNull());
+ return JSVAL_TO_OBJECT_IMPL(data);
+ }
+
+ void *toGCThing() const {
+ MOZ_ASSERT(isGCThing());
+ return JSVAL_TO_GCTHING_IMPL(data);
+ }
+
+ bool toBoolean() const {
+ MOZ_ASSERT(isBoolean());
+ return JSVAL_TO_BOOLEAN_IMPL(data);
+ }
+
+ uint32_t payloadAsRawUint32() const {
+ MOZ_ASSERT(!isDouble());
+ return data.s.payload.u32;
+ }
+
+ uint64_t asRawBits() const {
+ return data.asBits;
+ }
+
+ JSValueType extractNonDoubleType() const {
+ return JSVAL_EXTRACT_NON_DOUBLE_TYPE_IMPL(data);
+ }
+
+ /*
+ * Private API
+ *
+ * Private setters/getters allow the caller to read/write arbitrary types
+ * that fit in the 64-bit payload. It is the caller's responsibility, after
+ * storing to a value with setPrivateX to read only using getPrivateX.
+ * Privates values are given a type which ensures they are not marked.
+ */
+
+ void setPrivate(void *ptr) {
+ data = PRIVATE_PTR_TO_JSVAL_IMPL(ptr);
+ }
+
+ void *toPrivate() const {
+ MOZ_ASSERT(JSVAL_IS_DOUBLE_IMPL(data));
+ return JSVAL_TO_PRIVATE_PTR_IMPL(data);
+ }
+
+ void setPrivateUint32(uint32_t ui) {
+ MOZ_ASSERT(uint32_t(int32_t(ui)) == ui);
+ setInt32(int32_t(ui));
+ }
+
+ uint32_t toPrivateUint32() const {
+ return uint32_t(toInt32());
+ }
+
+ /*
+ * An unmarked value is just a void* cast as a Value. Thus, the Value is
+ * not safe for GC and must not be marked. This API avoids raw casts
+ * and the ensuing strict-aliasing warnings.
+ */
+
+ void setUnmarkedPtr(void *ptr) {
+ data.asPtr = ptr;
+ }
+
+ void *toUnmarkedPtr() const {
+ return data.asPtr;
+ }
+
+ const size_t *payloadWord() const {
+#if JS_BITS_PER_WORD == 32
+ return &data.s.payload.word;
+#elif JS_BITS_PER_WORD == 64
+ return &data.asWord;
+#endif
+ }
+
+ const uintptr_t *payloadUIntPtr() const {
+#if JS_BITS_PER_WORD == 32
+ return &data.s.payload.uintptr;
+#elif JS_BITS_PER_WORD == 64
+ return &data.asUIntPtr;
+#endif
+ }
+
+#if !defined(_MSC_VER) && !defined(__sparc)
+ // Value must be POD so that MSVC will pass it by value and not in memory
+ // (bug 689101); the same is true for SPARC as well (bug 737344). More
+ // precisely, we don't want Value return values compiled as out params.
+ private:
+#endif
+
+ jsval_layout data;
+
+ private:
+ void staticAssertions() {
+ JS_STATIC_ASSERT(sizeof(JSValueType) == 1);
+ JS_STATIC_ASSERT(sizeof(JSValueTag) == 4);
+ JS_STATIC_ASSERT(sizeof(JSBool) == 4);
+ JS_STATIC_ASSERT(sizeof(JSWhyMagic) <= 4);
+ JS_STATIC_ASSERT(sizeof(Value) == 8);
+ }
+
+ friend jsval_layout (::JSVAL_TO_IMPL)(Value);
+ friend Value (::IMPL_TO_JSVAL)(jsval_layout l);
+};
+
+inline bool
+IsPoisonedValue(const Value &v)
+{
+ if (v.isString())
+ return IsPoisonedPtr(v.toString());
+ if (v.isObject())
+ return IsPoisonedPtr(&v.toObject());
+ return false;
+}
+
+/************************************************************************/
+
+static inline Value
+NullValue()
+{
+ Value v;
+ v.setNull();
+ return v;
+}
+
+static inline Value
+UndefinedValue()
+{
+ Value v;
+ v.setUndefined();
+ return v;
+}
+
+static inline Value
+Int32Value(int32_t i32)
+{
+ Value v;
+ v.setInt32(i32);
+ return v;
+}
+
+static inline Value
+DoubleValue(double dbl)
+{
+ Value v;
+ v.setDouble(dbl);
+ return v;
+}
+
+static inline Value
+StringValue(JSString *str)
+{
+ Value v;
+ v.setString(str);
+ return v;
+}
+
+static inline Value
+BooleanValue(bool boo)
+{
+ Value v;
+ v.setBoolean(boo);
+ return v;
+}
+
+static inline Value
+ObjectValue(JSObject &obj)
+{
+ Value v;
+ v.setObject(obj);
+ return v;
+}
+
+static inline Value
+ObjectValueCrashOnTouch()
+{
+ Value v;
+ v.setObject(*reinterpret_cast<JSObject *>(0x42));
+ return v;
+}
+
+static inline Value
+MagicValue(JSWhyMagic why)
+{
+ Value v;
+ v.setMagic(why);
+ return v;
+}
+
+static inline Value
+NumberValue(float f)
+{
+ Value v;
+ v.setNumber(f);
+ return v;
+}
+
+static inline Value
+NumberValue(double dbl)
+{
+ Value v;
+ v.setNumber(dbl);
+ return v;
+}
+
+static inline Value
+NumberValue(int8_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(uint8_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(int16_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(uint16_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(int32_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(uint32_t i)
+{
+ Value v;
+ v.setNumber(i);
+ return v;
+}
+
+namespace detail {
+
+template <bool Signed>
+class MakeNumberValue
+{
+ public:
+ template<typename T>
+ static inline Value create(const T t)
+ {
+ Value v;
+ if (JSVAL_INT_MIN <= t && t <= JSVAL_INT_MAX)
+ v.setInt32(int32_t(t));
+ else
+ v.setDouble(double(t));
+ return v;
+ }
+};
+
+template <>
+class MakeNumberValue<false>
+{
+ public:
+ template<typename T>
+ static inline Value create(const T t)
+ {
+ Value v;
+ if (t <= JSVAL_INT_MAX)
+ v.setInt32(int32_t(t));
+ else
+ v.setDouble(double(t));
+ return v;
+ }
+};
+
+} // namespace detail
+
+template <typename T>
+static inline Value
+NumberValue(const T t)
+{
+ MOZ_ASSERT(T(double(t)) == t, "value creation would be lossy");
+ return detail::MakeNumberValue<std::numeric_limits<T>::is_signed>::create(t);
+}
+
+static inline Value
+ObjectOrNullValue(JSObject *obj)
+{
+ Value v;
+ v.setObjectOrNull(obj);
+ return v;
+}
+
+static inline Value
+PrivateValue(void *ptr)
+{
+ Value v;
+ v.setPrivate(ptr);
+ return v;
+}
+
+static inline Value
+PrivateUint32Value(uint32_t ui)
+{
+ Value v;
+ v.setPrivateUint32(ui);
+ return v;
+}
+
+inline bool
+SameType(const Value &lhs, const Value &rhs)
+{
+ return JSVAL_SAME_TYPE_IMPL(lhs.data, rhs.data);
+}
+
+} // namespace JS
+
+/************************************************************************/
+
+#ifdef JSGC_GENERATIONAL
+namespace JS {
+JS_PUBLIC_API(void) HeapValuePostBarrier(Value *valuep);
+JS_PUBLIC_API(void) HeapValueRelocate(Value *valuep);
+}
+#endif
+
+namespace js {
+
+template <> struct GCMethods<const JS::Value>
+{
+ static JS::Value initial() { return JS::UndefinedValue(); }
+ static ThingRootKind kind() { return THING_ROOT_VALUE; }
+ static bool poisoned(const JS::Value &v) { return JS::IsPoisonedValue(v); }
+};
+
+template <> struct GCMethods<JS::Value>
+{
+ static JS::Value initial() { return JS::UndefinedValue(); }
+ static ThingRootKind kind() { return THING_ROOT_VALUE; }
+ static bool poisoned(const JS::Value &v) { return JS::IsPoisonedValue(v); }
+ static bool needsPostBarrier(const JS::Value &v) { return v.isMarkable(); }
+#ifdef JSGC_GENERATIONAL
+ static void postBarrier(JS::Value *v) { JS::HeapValuePostBarrier(v); }
+ static void relocate(JS::Value *v) { JS::HeapValueRelocate(v); }
+#endif
+};
+
+template <class Outer> class UnbarrieredMutableValueOperations;
+template <class Outer> class MutableValueOperations;
+
+/*
+ * A class designed for CRTP use in implementing the non-mutating parts of the
+ * Value interface in Value-like classes. Outer must be a class inheriting
+ * ValueOperations<Outer> with a visible extract() method returning the
+ * const Value* abstracted by Outer.
+ */
+template <class Outer>
+class ValueOperations
+{
+ friend class UnbarrieredMutableValueOperations<Outer>;
+ friend class MutableValueOperations<Outer>;
+
+ const JS::Value * value() const { return static_cast<const Outer*>(this)->extract(); }
+
+ public:
+ bool isUndefined() const { return value()->isUndefined(); }
+ bool isNull() const { return value()->isNull(); }
+ bool isBoolean() const { return value()->isBoolean(); }
+ bool isTrue() const { return value()->isTrue(); }
+ bool isFalse() const { return value()->isFalse(); }
+ bool isNumber() const { return value()->isNumber(); }
+ bool isInt32() const { return value()->isInt32(); }
+ bool isDouble() const { return value()->isDouble(); }
+ bool isString() const { return value()->isString(); }
+ bool isObject() const { return value()->isObject(); }
+ bool isMagic() const { return value()->isMagic(); }
+ bool isMagic(JSWhyMagic why) const { return value()->isMagic(why); }
+ bool isMarkable() const { return value()->isMarkable(); }
+ bool isPrimitive() const { return value()->isPrimitive(); }
+ bool isGCThing() const { return value()->isGCThing(); }
+
+ bool isNullOrUndefined() const { return value()->isNullOrUndefined(); }
+ bool isObjectOrNull() const { return value()->isObjectOrNull(); }
+
+ bool toBoolean() const { return value()->toBoolean(); }
+ double toNumber() const { return value()->toNumber(); }
+ int32_t toInt32() const { return value()->toInt32(); }
+ double toDouble() const { return value()->toDouble(); }
+ JSString *toString() const { return value()->toString(); }
+ JSObject &toObject() const { return value()->toObject(); }
+ JSObject *toObjectOrNull() const { return value()->toObjectOrNull(); }
+ void *toGCThing() const { return value()->toGCThing(); }
+
+ JSValueType extractNonDoubleType() const { return value()->extractNonDoubleType(); }
+
+ JSWhyMagic whyMagic() const { return value()->whyMagic(); }
+};
+
+/*
+ * A class designed for CRTP use in implementing the mutating parts of the Value
+ * interface in Value-like classes that don't need post barriers. Outer must be
+ * a class inheriting UnbarrieredMutableValueOperations<Outer> with visible
+ * extractMutable() and extract() methods returning the const Value* and Value*
+ * abstracted by Outer.
+ */
+template <class Outer>
+class UnbarrieredMutableValueOperations : public ValueOperations<Outer>
+{
+ friend class MutableValueOperations<Outer>;
+ JS::Value * value() { return static_cast<Outer*>(this)->extractMutable(); }
+
+ public:
+ void setNull() { value()->setNull(); }
+ void setUndefined() { value()->setUndefined(); }
+ void setInt32(int32_t i) { value()->setInt32(i); }
+ void setDouble(double d) { value()->setDouble(d); }
+ void setBoolean(bool b) { value()->setBoolean(b); }
+ void setMagic(JSWhyMagic why) { value()->setMagic(why); }
+ bool setNumber(uint32_t ui) { return value()->setNumber(ui); }
+ bool setNumber(double d) { return value()->setNumber(d); }
+};
+
+/*
+ * A class designed for CRTP use in implementing all the mutating parts of the
+ * Value interface in Value-like classes. Outer must be a class inheriting
+ * MutableValueOperations<Outer> with visible extractMutable() and extract()
+ * methods returning the const Value* and Value* abstracted by Outer.
+ */
+template <class Outer>
+class MutableValueOperations : public UnbarrieredMutableValueOperations<Outer>
+{
+ public:
+ void setString(JSString *str) { this->value()->setString(str); }
+ void setString(const JS::Anchor<JSString *> &str) { this->value()->setString(str); }
+ void setObject(JSObject &obj) { this->value()->setObject(obj); }
+ void setObjectOrNull(JSObject *arg) { this->value()->setObjectOrNull(arg); }
+};
+
+/*
+ * Augment the generic Heap<T> interface when T = Value with
+ * type-querying, value-extracting, and mutating operations.
+ */
+template <>
+class HeapBase<JS::Value> : public UnbarrieredMutableValueOperations<JS::Heap<JS::Value> >
+{
+ typedef JS::Heap<JS::Value> Outer;
+
+ friend class ValueOperations<Outer>;
+ friend class UnbarrieredMutableValueOperations<Outer>;
+
+ const JS::Value * extract() const { return static_cast<const Outer*>(this)->address(); }
+ JS::Value * extractMutable() { return static_cast<Outer*>(this)->unsafeGet(); }
+
+ /*
+ * Setters that potentially change the value to a GC thing from a non-GC
+ * thing must call JS::Heap::set() to trigger the post barrier.
+ *
+ * Changing from a GC thing to a non-GC thing value will leave the heap
+ * value in the store buffer, but it will be ingored so this is not a
+ * problem.
+ */
+ void setBarriered(const JS::Value &v) {
+ static_cast<JS::Heap<JS::Value> *>(this)->set(v);
+ }
+
+ public:
+ void setString(JSString *str) { setBarriered(JS::StringValue(str)); }
+ void setString(const JS::Anchor<JSString *> &str) { setBarriered(JS::StringValue(str.get())); }
+ void setObject(JSObject &obj) { setBarriered(JS::ObjectValue(obj)); }
+
+ void setObjectOrNull(JSObject *arg) {
+ if (arg)
+ setObject(*arg);
+ else
+ setNull();
+ }
+};
+
+/*
+ * Augment the generic Handle<T> interface when T = Value with type-querying
+ * and value-extracting operations.
+ */
+template <>
+class HandleBase<JS::Value> : public ValueOperations<JS::Handle<JS::Value> >
+{
+ friend class ValueOperations<JS::Handle<JS::Value> >;
+ const JS::Value * extract() const {
+ return static_cast<const JS::Handle<JS::Value>*>(this)->address();
+ }
+};
+
+/*
+ * Augment the generic MutableHandle<T> interface when T = Value with
+ * type-querying, value-extracting, and mutating operations.
+ */
+template <>
+class MutableHandleBase<JS::Value> : public MutableValueOperations<JS::MutableHandle<JS::Value> >
+{
+ friend class ValueOperations<JS::MutableHandle<JS::Value> >;
+ const JS::Value * extract() const {
+ return static_cast<const JS::MutableHandle<JS::Value>*>(this)->address();
+ }
+
+ friend class UnbarrieredMutableValueOperations<JS::MutableHandle<JS::Value> >;
+ friend class MutableValueOperations<JS::MutableHandle<JS::Value> >;
+ JS::Value * extractMutable() {
+ return static_cast<JS::MutableHandle<JS::Value>*>(this)->address();
+ }
+};
+
+/*
+ * Augment the generic Rooted<T> interface when T = Value with type-querying,
+ * value-extracting, and mutating operations.
+ */
+template <>
+class RootedBase<JS::Value> : public MutableValueOperations<JS::Rooted<JS::Value> >
+{
+ friend class ValueOperations<JS::Rooted<JS::Value> >;
+ const JS::Value * extract() const {
+ return static_cast<const JS::Rooted<JS::Value>*>(this)->address();
+ }
+
+ friend class UnbarrieredMutableValueOperations<JS::Rooted<JS::Value> >;
+ friend class MutableValueOperations<JS::Rooted<JS::Value> >;
+ JS::Value * extractMutable() {
+ return static_cast<JS::Rooted<JS::Value>*>(this)->address();
+ }
+};
+
+} // namespace js
+
+inline jsval_layout
+JSVAL_TO_IMPL(JS::Value v)
+{
+ return v.data;
+}
+
+inline JS::Value
+IMPL_TO_JSVAL(jsval_layout l)
+{
+ JS::Value v;
+ v.data = l;
+ return v;
+}
+
+namespace JS {
+
+#ifndef __GNUC__
+/*
+ * The default assignment operator for |struct C| has the signature:
+ *
+ * C& C::operator=(const C&)
+ *
+ * And in particular requires implicit conversion of |this| to type |C| for the
+ * return value. But |volatile C| cannot thus be converted to |C|, so just
+ * doing |sink = hold| as in the non-specialized version would fail to compile.
+ * Do the assignment on asBits instead, since I don't think we want to give
+ * jsval_layout an assignment operator returning |volatile jsval_layout|.
+ */
+template<>
+inline Anchor<Value>::~Anchor()
+{
+ volatile uint64_t bits;
+ bits = JSVAL_TO_IMPL(hold).asBits;
+}
+#endif
+
+#ifdef JS_DEBUG
+namespace detail {
+
+struct ValueAlignmentTester { char c; JS::Value v; };
+MOZ_STATIC_ASSERT(sizeof(ValueAlignmentTester) == 16,
+ "JS::Value must be 16-byte-aligned");
+
+struct LayoutAlignmentTester { char c; jsval_layout l; };
+MOZ_STATIC_ASSERT(sizeof(LayoutAlignmentTester) == 16,
+ "jsval_layout must be 16-byte-aligned");
+
+} // namespace detail
+#endif /* JS_DEBUG */
+
+} // namespace JS
+
+/*
+ * JS::Value and jsval are the same type; jsval is the old name, kept around
+ * for backwards compatibility along with all the JSVAL_* operations below.
+ * jsval_layout is an implementation detail and should not be used externally.
+ */
+typedef JS::Value jsval;
+
+MOZ_STATIC_ASSERT(sizeof(jsval_layout) == sizeof(JS::Value),
+ "jsval_layout and JS::Value must have identical layouts");
+
+/************************************************************************/
+
+static inline JSBool
+JSVAL_IS_NULL(jsval v)
+{
+ return JSVAL_IS_NULL_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSBool
+JSVAL_IS_VOID(jsval v)
+{
+ return JSVAL_IS_UNDEFINED_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSBool
+JSVAL_IS_INT(jsval v)
+{
+ return JSVAL_IS_INT32_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline int32_t
+JSVAL_TO_INT(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_INT(v));
+ return JSVAL_TO_INT32_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline jsval
+INT_TO_JSVAL(int32_t i)
+{
+ return IMPL_TO_JSVAL(INT32_TO_JSVAL_IMPL(i));
+}
+
+static inline JSBool
+JSVAL_IS_DOUBLE(jsval v)
+{
+ return JSVAL_IS_DOUBLE_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline double
+JSVAL_TO_DOUBLE(jsval v)
+{
+ jsval_layout l;
+ MOZ_ASSERT(JSVAL_IS_DOUBLE(v));
+ l = JSVAL_TO_IMPL(v);
+ return l.asDouble;
+}
+
+static inline jsval
+DOUBLE_TO_JSVAL(double d)
+{
+ /*
+ * This is a manually inlined version of:
+ * d = JS_CANONICALIZE_NAN(d);
+ * return IMPL_TO_JSVAL(DOUBLE_TO_JSVAL_IMPL(d));
+ * because GCC from XCode 3.1.4 miscompiles the above code.
+ */
+ jsval_layout l;
+ if (MOZ_UNLIKELY(d != d))
+ l.asBits = 0x7FF8000000000000LL;
+ else
+ l.asDouble = d;
+ return IMPL_TO_JSVAL(l);
+}
+
+static inline jsval
+UINT_TO_JSVAL(uint32_t i)
+{
+ if (i <= JSVAL_INT_MAX)
+ return INT_TO_JSVAL((int32_t)i);
+ return DOUBLE_TO_JSVAL((double)i);
+}
+
+static inline JSBool
+JSVAL_IS_NUMBER(jsval v)
+{
+ return JSVAL_IS_NUMBER_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSBool
+JSVAL_IS_STRING(jsval v)
+{
+ return JSVAL_IS_STRING_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSString *
+JSVAL_TO_STRING(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_STRING(v));
+ return JSVAL_TO_STRING_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline jsval
+STRING_TO_JSVAL(JSString *str)
+{
+ return IMPL_TO_JSVAL(STRING_TO_JSVAL_IMPL(str));
+}
+
+static inline JSObject *
+JSVAL_TO_OBJECT(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_OBJECT_OR_NULL_IMPL(JSVAL_TO_IMPL(v)));
+ return JSVAL_TO_OBJECT_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline jsval
+OBJECT_TO_JSVAL(JSObject *obj)
+{
+ if (obj)
+ return IMPL_TO_JSVAL(OBJECT_TO_JSVAL_IMPL(obj));
+ return IMPL_TO_JSVAL(BUILD_JSVAL(JSVAL_TAG_NULL, 0));
+}
+
+static inline JSBool
+JSVAL_IS_BOOLEAN(jsval v)
+{
+ return JSVAL_IS_BOOLEAN_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSBool
+JSVAL_TO_BOOLEAN(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_BOOLEAN(v));
+ return JSVAL_TO_BOOLEAN_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline jsval
+BOOLEAN_TO_JSVAL(JSBool b)
+{
+ return IMPL_TO_JSVAL(BOOLEAN_TO_JSVAL_IMPL(b));
+}
+
+static inline JSBool
+JSVAL_IS_PRIMITIVE(jsval v)
+{
+ return JSVAL_IS_PRIMITIVE_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline JSBool
+JSVAL_IS_GCTHING(jsval v)
+{
+ return JSVAL_IS_GCTHING_IMPL(JSVAL_TO_IMPL(v));
+}
+
+static inline void *
+JSVAL_TO_GCTHING(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_GCTHING(v));
+ return JSVAL_TO_GCTHING_IMPL(JSVAL_TO_IMPL(v));
+}
+
+/* To be GC-safe, privates are tagged as doubles. */
+
+static inline jsval
+PRIVATE_TO_JSVAL(void *ptr)
+{
+ return IMPL_TO_JSVAL(PRIVATE_PTR_TO_JSVAL_IMPL(ptr));
+}
+
+static inline void *
+JSVAL_TO_PRIVATE(jsval v)
+{
+ MOZ_ASSERT(JSVAL_IS_DOUBLE(v));
+ return JSVAL_TO_PRIVATE_PTR_IMPL(JSVAL_TO_IMPL(v));
+}
+
+#endif /* js_Value_h */
diff --git a/js/public/Vector.h b/js/public/Vector.h
new file mode 100644
index 0000000..7e0bd76
--- /dev/null
+++ b/js/public/Vector.h
@@ -0,0 +1,1107 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Vector_h
+#define js_Vector_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/TypeTraits.h"
+
+#include "TemplateLib.h"
+#include "Utility.h"
+
+/* Silence dire "bugs in previous versions of MSVC have been fixed" warnings */
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4345)
+#endif
+
+namespace js {
+
+class TempAllocPolicy;
+
+template <class T,
+ size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy>
+class Vector;
+
+/*
+ * Check that the given capacity wastes the minimal amount of space if
+ * allocated on the heap. This means that cap*sizeof(T) is as close to a
+ * power-of-two as possible. growStorageBy() is responsible for ensuring
+ * this.
+ */
+template <typename T>
+static bool CapacityHasExcessSpace(size_t cap)
+{
+ size_t size = cap * sizeof(T);
+ return RoundUpPow2(size) - size >= sizeof(T);
+}
+
+/*
+ * This template class provides a default implementation for vector operations
+ * when the element type is not known to be a POD, as judged by IsPod.
+ */
+template <class T, size_t N, class AP, bool IsPod>
+struct VectorImpl
+{
+ /* Destroys constructed objects in the range [begin, end). */
+ static inline void destroy(T *begin, T *end) {
+ for (T *p = begin; p != end; ++p)
+ p->~T();
+ }
+
+ /* Constructs objects in the uninitialized range [begin, end). */
+ static inline void initialize(T *begin, T *end) {
+ for (T *p = begin; p != end; ++p)
+ new(p) T();
+ }
+
+ /*
+ * Copy-constructs objects in the uninitialized range
+ * [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
+ */
+ template <class U>
+ static inline void copyConstruct(T *dst, const U *srcbeg, const U *srcend) {
+ for (const U *p = srcbeg; p != srcend; ++p, ++dst)
+ new(dst) T(*p);
+ }
+
+ /*
+ * Move-constructs objects in the uninitialized range
+ * [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
+ */
+ template <class U>
+ static inline void moveConstruct(T *dst, const U *srcbeg, const U *srcend) {
+ for (const U *p = srcbeg; p != srcend; ++p, ++dst)
+ new(dst) T(Move(*p));
+ }
+
+ /*
+ * Copy-constructs objects in the uninitialized range [dst, dst+n) from the
+ * same object u.
+ */
+ template <class U>
+ static inline void copyConstructN(T *dst, size_t n, const U &u) {
+ for (T *end = dst + n; dst != end; ++dst)
+ new(dst) T(u);
+ }
+
+ /*
+ * Grows the given buffer to have capacity newCap, preserving the objects
+ * constructed in the range [begin, end) and updating v. Assumes that (1)
+ * newCap has not overflowed, and (2) multiplying newCap by sizeof(T) will
+ * not overflow.
+ */
+ static inline bool growTo(Vector<T,N,AP> &v, size_t newCap) {
+ JS_ASSERT(!v.usingInlineStorage());
+ JS_ASSERT(!CapacityHasExcessSpace<T>(newCap));
+ T *newbuf = reinterpret_cast<T *>(v.malloc_(newCap * sizeof(T)));
+ if (!newbuf)
+ return false;
+ for (T *dst = newbuf, *src = v.beginNoCheck(); src != v.endNoCheck(); ++dst, ++src)
+ new(dst) T(Move(*src));
+ VectorImpl::destroy(v.beginNoCheck(), v.endNoCheck());
+ v.free_(v.mBegin);
+ v.mBegin = newbuf;
+ /* v.mLength is unchanged. */
+ v.mCapacity = newCap;
+ return true;
+ }
+};
+
+/*
+ * This partial template specialization provides a default implementation for
+ * vector operations when the element type is known to be a POD, as judged by
+ * IsPod.
+ */
+template <class T, size_t N, class AP>
+struct VectorImpl<T, N, AP, true>
+{
+ static inline void destroy(T *, T *) {}
+
+ static inline void initialize(T *begin, T *end) {
+ /*
+ * You would think that memset would be a big win (or even break even)
+ * when we know T is a POD. But currently it's not. This is probably
+ * because |append| tends to be given small ranges and memset requires
+ * a function call that doesn't get inlined.
+ *
+ * memset(begin, 0, sizeof(T) * (end-begin));
+ */
+ for (T *p = begin; p != end; ++p)
+ new(p) T();
+ }
+
+ template <class U>
+ static inline void copyConstruct(T *dst, const U *srcbeg, const U *srcend) {
+ /*
+ * See above memset comment. Also, notice that copyConstruct is
+ * currently templated (T != U), so memcpy won't work without
+ * requiring T == U.
+ *
+ * memcpy(dst, srcbeg, sizeof(T) * (srcend - srcbeg));
+ */
+ for (const U *p = srcbeg; p != srcend; ++p, ++dst)
+ *dst = *p;
+ }
+
+ template <class U>
+ static inline void moveConstruct(T *dst, const U *srcbeg, const U *srcend) {
+ copyConstruct(dst, srcbeg, srcend);
+ }
+
+ static inline void copyConstructN(T *dst, size_t n, const T &t) {
+ for (T *p = dst, *end = dst + n; p != end; ++p)
+ *p = t;
+ }
+
+ static inline bool growTo(Vector<T,N,AP> &v, size_t newCap) {
+ JS_ASSERT(!v.usingInlineStorage());
+ JS_ASSERT(!CapacityHasExcessSpace<T>(newCap));
+ size_t oldSize = sizeof(T) * v.mCapacity;
+ size_t newSize = sizeof(T) * newCap;
+ T *newbuf = reinterpret_cast<T *>(v.realloc_(v.mBegin, oldSize, newSize));
+ if (!newbuf)
+ return false;
+ v.mBegin = newbuf;
+ /* v.mLength is unchanged. */
+ v.mCapacity = newCap;
+ return true;
+ }
+};
+
+/*
+ * JS-friendly, STL-like container providing a short-lived, dynamic buffer.
+ * Vector calls the constructors/destructors of all elements stored in
+ * its internal buffer, so non-PODs may be safely used. Additionally,
+ * Vector will store the first N elements in-place before resorting to
+ * dynamic allocation.
+ *
+ * T requirements:
+ * - default and copy constructible, assignable, destructible
+ * - operations do not throw
+ * N requirements:
+ * - any value, however, N is clamped to min/max values
+ * AllocPolicy:
+ * - see "Allocation policies" in jsalloc.h (default js::TempAllocPolicy)
+ *
+ * N.B: Vector is not reentrant: T member functions called during Vector member
+ * functions must not call back into the same object.
+ */
+template <class T, size_t N, class AllocPolicy>
+class Vector : private AllocPolicy
+{
+ // typedef typename tl::StaticAssert<!tl::IsPostBarrieredType<T>::result>::result _;
+
+ /* utilities */
+
+ static const bool sElemIsPod = mozilla::IsPod<T>::value;
+ typedef VectorImpl<T, N, AllocPolicy, sElemIsPod> Impl;
+ friend struct VectorImpl<T, N, AllocPolicy, sElemIsPod>;
+
+ bool growStorageBy(size_t incr);
+ bool convertToHeapStorage(size_t newCap);
+
+ template <bool InitNewElems> inline bool growByImpl(size_t inc);
+
+ /* magic constants */
+
+ static const int sMaxInlineBytes = 1024;
+
+ /* compute constants */
+
+ /*
+ * Consider element size to be 1 for buffer sizing if there are
+ * 0 inline elements. This allows us to compile when the definition
+ * of the element type is not visible here.
+ *
+ * Explicit specialization is only allowed at namespace scope, so
+ * in order to keep everything here, we use a dummy template
+ * parameter with partial specialization.
+ */
+ template <int M, int Dummy>
+ struct ElemSize {
+ static const size_t result = sizeof(T);
+ };
+ template <int Dummy>
+ struct ElemSize<0, Dummy> {
+ static const size_t result = 1;
+ };
+
+ static const size_t sInlineCapacity =
+ tl::Min<N, sMaxInlineBytes / ElemSize<N, 0>::result>::result;
+
+ /* Calculate inline buffer size; avoid 0-sized array. */
+ static const size_t sInlineBytes =
+ tl::Max<1, sInlineCapacity * ElemSize<N, 0>::result>::result;
+
+ /* member data */
+
+ /*
+ * Pointer to the buffer, be it inline or heap-allocated. Only [mBegin,
+ * mBegin + mLength) hold valid constructed T objects. The range [mBegin +
+ * mLength, mBegin + mCapacity) holds uninitialized memory. The range
+ * [mBegin + mLength, mBegin + mReserved) also holds uninitialized memory
+ * previously allocated by a call to reserve().
+ */
+ T *mBegin;
+ size_t mLength; /* Number of elements in the Vector. */
+ size_t mCapacity; /* Max number of elements storable in the Vector without resizing. */
+#ifdef JS_DEBUG
+ size_t mReserved; /* Max elements of reserved or used space in this vector. */
+#endif
+
+ mozilla::AlignedStorage<sInlineBytes> storage;
+
+#ifdef JS_DEBUG
+ friend class ReentrancyGuard;
+ bool entered;
+#endif
+
+ Vector(const Vector &) MOZ_DELETE;
+ Vector &operator=(const Vector &) MOZ_DELETE;
+
+ /* private accessors */
+
+ bool usingInlineStorage() const {
+ return mBegin == inlineStorage();
+ }
+
+ T *inlineStorage() const {
+ return (T *)storage.addr();
+ }
+
+ T *beginNoCheck() const {
+ return mBegin;
+ }
+
+ T *endNoCheck() {
+ return mBegin + mLength;
+ }
+
+ const T *endNoCheck() const {
+ return mBegin + mLength;
+ }
+
+#ifdef JS_DEBUG
+ size_t reserved() const {
+ JS_ASSERT(mReserved <= mCapacity);
+ JS_ASSERT(mLength <= mReserved);
+ return mReserved;
+ }
+#endif
+
+ /* Append operations guaranteed to succeed due to pre-reserved space. */
+ template <class U> void internalAppend(U u);
+ void internalAppendN(const T &t, size_t n);
+ template <class U> void internalAppend(const U *begin, size_t length);
+ template <class U, size_t O, class BP> void internalAppend(const Vector<U,O,BP> &other);
+
+ public:
+ static const size_t sMaxInlineStorage = N;
+
+ typedef T ElementType;
+
+ Vector(AllocPolicy = AllocPolicy());
+ Vector(MoveRef<Vector>); /* Move constructor. */
+ Vector &operator=(MoveRef<Vector>); /* Move assignment. */
+ ~Vector();
+
+ /* accessors */
+
+ const AllocPolicy &allocPolicy() const {
+ return *this;
+ }
+
+ AllocPolicy &allocPolicy() {
+ return *this;
+ }
+
+ enum { InlineLength = N };
+
+ size_t length() const {
+ return mLength;
+ }
+
+ bool empty() const {
+ return mLength == 0;
+ }
+
+ size_t capacity() const {
+ return mCapacity;
+ }
+
+ T *begin() {
+ JS_ASSERT(!entered);
+ return mBegin;
+ }
+
+ const T *begin() const {
+ JS_ASSERT(!entered);
+ return mBegin;
+ }
+
+ T *end() {
+ JS_ASSERT(!entered);
+ return mBegin + mLength;
+ }
+
+ const T *end() const {
+ JS_ASSERT(!entered);
+ return mBegin + mLength;
+ }
+
+ T &operator[](size_t i) {
+ JS_ASSERT(!entered && i < mLength);
+ return begin()[i];
+ }
+
+ const T &operator[](size_t i) const {
+ JS_ASSERT(!entered && i < mLength);
+ return begin()[i];
+ }
+
+ T &back() {
+ JS_ASSERT(!entered && !empty());
+ return *(end() - 1);
+ }
+
+ const T &back() const {
+ JS_ASSERT(!entered && !empty());
+ return *(end() - 1);
+ }
+
+ class Range {
+ friend class Vector;
+ T *cur_, *end_;
+ Range(T *cur, T *end) : cur_(cur), end_(end) {}
+ public:
+ Range() {}
+ bool empty() const { return cur_ == end_; }
+ size_t remain() const { return end_ - cur_; }
+ T &front() const { return *cur_; }
+ void popFront() { JS_ASSERT(!empty()); ++cur_; }
+ T popCopyFront() { JS_ASSERT(!empty()); return *cur_++; }
+ };
+
+ Range all() {
+ return Range(begin(), end());
+ }
+
+ /* mutators */
+
+ /* Given that the Vector is empty and has no inline storage, grow to |capacity|. */
+ bool initCapacity(size_t request);
+
+ /* If reserve(length() + N) succeeds, the N next appends are guaranteed to succeed. */
+ bool reserve(size_t request);
+
+ /*
+ * Destroy elements in the range [end() - incr, end()). Does not deallocate
+ * or unreserve storage for those elements.
+ */
+ void shrinkBy(size_t incr);
+
+ /* Grow the vector by incr elements. */
+ bool growBy(size_t incr);
+
+ /* Call shrinkBy or growBy based on whether newSize > length(). */
+ bool resize(size_t newLength);
+
+ /* Leave new elements as uninitialized memory. */
+ bool growByUninitialized(size_t incr);
+ bool resizeUninitialized(size_t newLength);
+
+ /* Shorthand for shrinkBy(length()). */
+ void clear();
+
+ /* Clears and releases any heap-allocated storage. */
+ void clearAndFree();
+
+ /* If true, appending |needed| elements will not call realloc(). */
+ bool canAppendWithoutRealloc(size_t needed) const;
+
+ /*
+ * Potentially fallible append operations.
+ *
+ * The function templates that take an unspecified type U require a
+ * const T & or a MoveRef<T>. The MoveRef<T> variants move their
+ * operands into the vector, instead of copying them. If they fail, the
+ * operand is left unmoved.
+ */
+ template <class U> bool append(U t);
+ bool appendN(const T &t, size_t n);
+ template <class U> bool append(const U *begin, const U *end);
+ template <class U> bool append(const U *begin, size_t length);
+ template <class U, size_t O, class BP> bool append(const Vector<U,O,BP> &other);
+
+ /*
+ * Guaranteed-infallible append operations for use upon vectors whose
+ * memory has been pre-reserved.
+ */
+ template <class U> void infallibleAppend(const U &u) {
+ internalAppend(u);
+ }
+ void infallibleAppendN(const T &t, size_t n) {
+ internalAppendN(t, n);
+ }
+ template <class U> void infallibleAppend(const U *aBegin, const U *aEnd) {
+ internalAppend(aBegin, mozilla::PointerRangeSize(aBegin, aEnd));
+ }
+ template <class U> void infallibleAppend(const U *aBegin, size_t aLength) {
+ internalAppend(aBegin, aLength);
+ }
+ template <class U, size_t O, class BP> void infallibleAppend(const Vector<U,O,BP> &other) {
+ internalAppend(other);
+ }
+
+ void popBack();
+
+ T popCopy();
+
+ /*
+ * Transfers ownership of the internal buffer used by Vector to the caller.
+ * After this call, the Vector is empty. Since the returned buffer may need
+ * to be allocated (if the elements are currently stored in-place), the
+ * call can fail, returning NULL.
+ *
+ * N.B. Although a T*, only the range [0, length()) is constructed.
+ */
+ T *extractRawBuffer();
+
+ /*
+ * Transfer ownership of an array of objects into the Vector.
+ * N.B. This call assumes that there are no uninitialized elements in the
+ * passed array.
+ */
+ void replaceRawBuffer(T *p, size_t length);
+
+ /*
+ * Places |val| at position |p|, shifting existing elements from |p|
+ * onward one position higher. On success, |p| should not be reused
+ * because it will be a dangling pointer if reallocation of the vector
+ * storage occurred; the return value should be used instead. On failure,
+ * NULL is returned.
+ *
+ * Example usage:
+ *
+ * if (!(p = vec.insert(p, val)))
+ * <handle failure>
+ * <keep working with p>
+ */
+ T *insert(T *p, const T &val);
+
+ /*
+ * Removes the element |t|, which must fall in the bounds [begin, end),
+ * shifting existing elements from |t + 1| onward one position lower.
+ */
+ void erase(T *t);
+
+ /*
+ * Measure the size of the Vector's heap-allocated storage.
+ */
+ size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const;
+
+ /*
+ * Like sizeOfExcludingThis, but also measures the size of the Vector
+ * object (which must be heap-allocated) itself.
+ */
+ size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const;
+
+ void swap(Vector &other);
+};
+
+/* This does the re-entrancy check plus several other sanity checks. */
+#define REENTRANCY_GUARD_ET_AL \
+ ReentrancyGuard g(*this); \
+ JS_ASSERT_IF(usingInlineStorage(), mCapacity == sInlineCapacity); \
+ JS_ASSERT(reserved() <= mCapacity); \
+ JS_ASSERT(mLength <= reserved()); \
+ JS_ASSERT(mLength <= mCapacity)
+
+/* Vector Implementation */
+
+template <class T, size_t N, class AllocPolicy>
+JS_ALWAYS_INLINE
+Vector<T,N,AllocPolicy>::Vector(AllocPolicy ap)
+ : AllocPolicy(ap), mBegin((T *)storage.addr()), mLength(0),
+ mCapacity(sInlineCapacity)
+#ifdef JS_DEBUG
+ , mReserved(sInlineCapacity), entered(false)
+#endif
+{}
+
+/* Move constructor. */
+template <class T, size_t N, class AllocPolicy>
+JS_ALWAYS_INLINE
+Vector<T, N, AllocPolicy>::Vector(MoveRef<Vector> rhs)
+ : AllocPolicy(rhs)
+#ifdef JS_DEBUG
+ , entered(false)
+#endif
+{
+ mLength = rhs->mLength;
+ mCapacity = rhs->mCapacity;
+#ifdef JS_DEBUG
+ mReserved = rhs->mReserved;
+#endif
+
+ if (rhs->usingInlineStorage()) {
+ /* We can't move the buffer over in this case, so copy elements. */
+ mBegin = (T *)storage.addr();
+ Impl::moveConstruct(mBegin, rhs->beginNoCheck(), rhs->endNoCheck());
+ /*
+ * Leave rhs's mLength, mBegin, mCapacity, and mReserved as they are.
+ * The elements in its in-line storage still need to be destroyed.
+ */
+ } else {
+ /*
+ * Take src's buffer, and turn src into an empty vector using
+ * in-line storage.
+ */
+ mBegin = rhs->mBegin;
+ rhs->mBegin = (T *) rhs->storage.addr();
+ rhs->mCapacity = sInlineCapacity;
+ rhs->mLength = 0;
+#ifdef JS_DEBUG
+ rhs->mReserved = sInlineCapacity;
+#endif
+ }
+}
+
+/* Move assignment. */
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE
+Vector<T, N, AP> &
+Vector<T, N, AP>::operator=(MoveRef<Vector> rhs)
+{
+ this->~Vector();
+ new(this) Vector(rhs);
+ return *this;
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE
+Vector<T,N,AP>::~Vector()
+{
+ REENTRANCY_GUARD_ET_AL;
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ if (!usingInlineStorage())
+ this->free_(beginNoCheck());
+}
+
+/*
+ * This function will create a new heap buffer with capacity newCap,
+ * move all elements in the inline buffer to this new buffer,
+ * and fail on OOM.
+ */
+template <class T, size_t N, class AP>
+inline bool
+Vector<T,N,AP>::convertToHeapStorage(size_t newCap)
+{
+ JS_ASSERT(usingInlineStorage());
+
+ /* Allocate buffer. */
+ JS_ASSERT(!CapacityHasExcessSpace<T>(newCap));
+ T *newBuf = reinterpret_cast<T *>(this->malloc_(newCap * sizeof(T)));
+ if (!newBuf)
+ return false;
+
+ /* Copy inline elements into heap buffer. */
+ Impl::moveConstruct(newBuf, beginNoCheck(), endNoCheck());
+ Impl::destroy(beginNoCheck(), endNoCheck());
+
+ /* Switch in heap buffer. */
+ mBegin = newBuf;
+ /* mLength is unchanged. */
+ mCapacity = newCap;
+ return true;
+}
+
+template <class T, size_t N, class AP>
+JS_NEVER_INLINE bool
+Vector<T,N,AP>::growStorageBy(size_t incr)
+{
+ JS_ASSERT(mLength + incr > mCapacity);
+ JS_ASSERT_IF(!usingInlineStorage(), !CapacityHasExcessSpace<T>(mCapacity));
+
+ /*
+ * When choosing a new capacity, its size should is as close to 2^N bytes
+ * as possible. 2^N-sized requests are best because they are unlikely to
+ * be rounded up by the allocator. Asking for a 2^N number of elements
+ * isn't as good, because if sizeof(T) is not a power-of-two that would
+ * result in a non-2^N request size.
+ */
+
+ size_t newCap;
+
+ if (incr == 1) {
+ if (usingInlineStorage()) {
+ /* This case occurs in ~70--80% of the calls to this function. */
+ size_t newSize = tl::RoundUpPow2<(sInlineCapacity + 1) * sizeof(T)>::result;
+ newCap = newSize / sizeof(T);
+ goto convert;
+ }
+
+ if (mLength == 0) {
+ /* This case occurs in ~0--10% of the calls to this function. */
+ newCap = 1;
+ goto grow;
+ }
+
+ /* This case occurs in ~15--20% of the calls to this function. */
+
+ /*
+ * Will mLength*4*sizeof(T) overflow? This condition limits a Vector
+ * to 1GB of memory on a 32-bit system, which is a reasonable limit.
+ * It also ensures that the ((char *)end() - (char *)begin()) does not
+ * overflow ptrdiff_t (see Bug 510319).
+ */
+ if (mLength & tl::MulOverflowMask<4 * sizeof(T)>::result) {
+ this->reportAllocOverflow();
+ return false;
+ }
+
+ /*
+ * If we reach here, the existing capacity will have a size that is
+ * already as close to 2^N as sizeof(T) will allow. Just double the
+ * capacity, and then there might be space for one more element.
+ */
+ newCap = mLength * 2;
+ if (CapacityHasExcessSpace<T>(newCap))
+ newCap += 1;
+
+ } else {
+ /* This case occurs in ~2% of the calls to this function. */
+ size_t newMinCap = mLength + incr;
+
+ /* Did mLength+incr overflow? Will newCap*sizeof(T) overflow? */
+ if (newMinCap < mLength ||
+ newMinCap & tl::MulOverflowMask<2 * sizeof(T)>::result)
+ {
+ this->reportAllocOverflow();
+ return false;
+ }
+
+ size_t newMinSize = newMinCap * sizeof(T);
+ size_t newSize = RoundUpPow2(newMinSize);
+ newCap = newSize / sizeof(T);
+ }
+
+ if (usingInlineStorage()) {
+ convert:
+ return convertToHeapStorage(newCap);
+ }
+
+ grow:
+ return Impl::growTo(*this, newCap);
+}
+
+template <class T, size_t N, class AP>
+inline bool
+Vector<T,N,AP>::initCapacity(size_t request)
+{
+ JS_ASSERT(empty());
+ JS_ASSERT(usingInlineStorage());
+ if (request == 0)
+ return true;
+ T *newbuf = reinterpret_cast<T *>(this->malloc_(request * sizeof(T)));
+ if (!newbuf)
+ return false;
+ mBegin = newbuf;
+ mCapacity = request;
+#ifdef JS_DEBUG
+ mReserved = request;
+#endif
+ return true;
+}
+
+template <class T, size_t N, class AP>
+inline bool
+Vector<T,N,AP>::reserve(size_t request)
+{
+ REENTRANCY_GUARD_ET_AL;
+ if (request > mCapacity && !growStorageBy(request - mLength))
+ return false;
+
+#ifdef JS_DEBUG
+ if (request > mReserved)
+ mReserved = request;
+ JS_ASSERT(mLength <= mReserved);
+ JS_ASSERT(mReserved <= mCapacity);
+#endif
+ return true;
+}
+
+template <class T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::shrinkBy(size_t incr)
+{
+ REENTRANCY_GUARD_ET_AL;
+ JS_ASSERT(incr <= mLength);
+ Impl::destroy(endNoCheck() - incr, endNoCheck());
+ mLength -= incr;
+}
+
+template <class T, size_t N, class AP>
+template <bool InitNewElems>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::growByImpl(size_t incr)
+{
+ REENTRANCY_GUARD_ET_AL;
+ if (incr > mCapacity - mLength && !growStorageBy(incr))
+ return false;
+
+ JS_ASSERT(mLength + incr <= mCapacity);
+ T *newend = endNoCheck() + incr;
+ if (InitNewElems)
+ Impl::initialize(endNoCheck(), newend);
+ mLength += incr;
+#ifdef JS_DEBUG
+ if (mLength > mReserved)
+ mReserved = mLength;
+#endif
+ return true;
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::growBy(size_t incr)
+{
+ return growByImpl<true>(incr);
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::growByUninitialized(size_t incr)
+{
+ return growByImpl<false>(incr);
+}
+
+template <class T, size_t N, class AP>
+STATIC_POSTCONDITION(!return || ubound(this->begin()) >= newLength)
+inline bool
+Vector<T,N,AP>::resize(size_t newLength)
+{
+ size_t curLength = mLength;
+ if (newLength > curLength)
+ return growBy(newLength - curLength);
+ shrinkBy(curLength - newLength);
+ return true;
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::resizeUninitialized(size_t newLength)
+{
+ size_t curLength = mLength;
+ if (newLength > curLength)
+ return growByUninitialized(newLength - curLength);
+ shrinkBy(curLength - newLength);
+ return true;
+}
+
+template <class T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::clear()
+{
+ REENTRANCY_GUARD_ET_AL;
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ mLength = 0;
+}
+
+template <class T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::clearAndFree()
+{
+ clear();
+
+ if (usingInlineStorage())
+ return;
+
+ this->free_(beginNoCheck());
+ mBegin = (T *)storage.addr();
+ mCapacity = sInlineCapacity;
+#ifdef JS_DEBUG
+ mReserved = sInlineCapacity;
+#endif
+}
+
+template <class T, size_t N, class AP>
+inline bool
+Vector<T,N,AP>::canAppendWithoutRealloc(size_t needed) const
+{
+ return mLength + needed <= mCapacity;
+}
+
+template <class T, size_t N, class AP>
+template <class U>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::append(U t)
+{
+ REENTRANCY_GUARD_ET_AL;
+ if (mLength == mCapacity && !growStorageBy(1))
+ return false;
+
+#ifdef JS_DEBUG
+ if (mLength + 1 > mReserved)
+ mReserved = mLength + 1;
+#endif
+ internalAppend(t);
+ return true;
+}
+
+template <class T, size_t N, class AP>
+template <class U>
+JS_ALWAYS_INLINE void
+Vector<T,N,AP>::internalAppend(U u)
+{
+ JS_ASSERT(mLength + 1 <= mReserved);
+ JS_ASSERT(mReserved <= mCapacity);
+ new(endNoCheck()) T(u);
+ ++mLength;
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::appendN(const T &t, size_t needed)
+{
+ REENTRANCY_GUARD_ET_AL;
+ if (mLength + needed > mCapacity && !growStorageBy(needed))
+ return false;
+
+#ifdef JS_DEBUG
+ if (mLength + needed > mReserved)
+ mReserved = mLength + needed;
+#endif
+ internalAppendN(t, needed);
+ return true;
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE void
+Vector<T,N,AP>::internalAppendN(const T &t, size_t needed)
+{
+ JS_ASSERT(mLength + needed <= mReserved);
+ JS_ASSERT(mReserved <= mCapacity);
+ Impl::copyConstructN(endNoCheck(), needed, t);
+ mLength += needed;
+}
+
+template <class T, size_t N, class AP>
+inline T *
+Vector<T,N,AP>::insert(T *p, const T &val)
+{
+ JS_ASSERT(begin() <= p && p <= end());
+ size_t pos = p - begin();
+ JS_ASSERT(pos <= mLength);
+ size_t oldLength = mLength;
+ if (pos == oldLength) {
+ if (!append(val))
+ return NULL;
+ } else {
+ T oldBack = back();
+ if (!append(oldBack)) /* Dup the last element. */
+ return NULL;
+ for (size_t i = oldLength; i > pos; --i)
+ (*this)[i] = (*this)[i - 1];
+ (*this)[pos] = val;
+ }
+ return begin() + pos;
+}
+
+template<typename T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::erase(T *it)
+{
+ JS_ASSERT(begin() <= it && it < end());
+ while (it + 1 != end()) {
+ *it = *(it + 1);
+ ++it;
+ }
+ popBack();
+}
+
+template <class T, size_t N, class AP>
+template <class U>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::append(const U *insBegin, const U *insEnd)
+{
+ REENTRANCY_GUARD_ET_AL;
+ size_t needed = mozilla::PointerRangeSize(insBegin, insEnd);
+ if (mLength + needed > mCapacity && !growStorageBy(needed))
+ return false;
+
+#ifdef JS_DEBUG
+ if (mLength + needed > mReserved)
+ mReserved = mLength + needed;
+#endif
+ internalAppend(insBegin, needed);
+ return true;
+}
+
+template <class T, size_t N, class AP>
+template <class U>
+JS_ALWAYS_INLINE void
+Vector<T,N,AP>::internalAppend(const U *insBegin, size_t insLength)
+{
+ JS_ASSERT(mLength + insLength <= mReserved);
+ JS_ASSERT(mReserved <= mCapacity);
+ Impl::copyConstruct(endNoCheck(), insBegin, insBegin + insLength);
+ mLength += insLength;
+}
+
+template <class T, size_t N, class AP>
+template <class U, size_t O, class BP>
+inline bool
+Vector<T,N,AP>::append(const Vector<U,O,BP> &other)
+{
+ return append(other.begin(), other.end());
+}
+
+template <class T, size_t N, class AP>
+template <class U, size_t O, class BP>
+inline void
+Vector<T,N,AP>::internalAppend(const Vector<U,O,BP> &other)
+{
+ internalAppend(other.begin(), other.length());
+}
+
+template <class T, size_t N, class AP>
+template <class U>
+JS_ALWAYS_INLINE bool
+Vector<T,N,AP>::append(const U *insBegin, size_t insLength)
+{
+ return this->append(insBegin, insBegin + insLength);
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE void
+Vector<T,N,AP>::popBack()
+{
+ REENTRANCY_GUARD_ET_AL;
+ JS_ASSERT(!empty());
+ --mLength;
+ endNoCheck()->~T();
+}
+
+template <class T, size_t N, class AP>
+JS_ALWAYS_INLINE T
+Vector<T,N,AP>::popCopy()
+{
+ T ret = back();
+ popBack();
+ return ret;
+}
+
+template <class T, size_t N, class AP>
+inline T *
+Vector<T,N,AP>::extractRawBuffer()
+{
+ T *ret;
+ if (usingInlineStorage()) {
+ ret = reinterpret_cast<T *>(this->malloc_(mLength * sizeof(T)));
+ if (!ret)
+ return NULL;
+ Impl::copyConstruct(ret, beginNoCheck(), endNoCheck());
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ /* mBegin, mCapacity are unchanged. */
+ mLength = 0;
+ } else {
+ ret = mBegin;
+ mBegin = (T *)storage.addr();
+ mLength = 0;
+ mCapacity = sInlineCapacity;
+#ifdef JS_DEBUG
+ mReserved = sInlineCapacity;
+#endif
+ }
+ return ret;
+}
+
+template <class T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::replaceRawBuffer(T *p, size_t aLength)
+{
+ REENTRANCY_GUARD_ET_AL;
+
+ /* Destroy what we have. */
+ Impl::destroy(beginNoCheck(), endNoCheck());
+ if (!usingInlineStorage())
+ this->free_(beginNoCheck());
+
+ /* Take in the new buffer. */
+ if (aLength <= sInlineCapacity) {
+ /*
+ * We convert to inline storage if possible, even though p might
+ * otherwise be acceptable. Maybe this behaviour should be
+ * specifiable with an argument to this function.
+ */
+ mBegin = (T *)storage.addr();
+ mLength = aLength;
+ mCapacity = sInlineCapacity;
+ Impl::moveConstruct(mBegin, p, p + aLength);
+ Impl::destroy(p, p + aLength);
+ this->free_(p);
+ } else {
+ mBegin = p;
+ mLength = aLength;
+ mCapacity = aLength;
+ }
+#ifdef JS_DEBUG
+ mReserved = aLength;
+#endif
+}
+
+template <class T, size_t N, class AP>
+inline size_t
+Vector<T,N,AP>::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const
+{
+ return usingInlineStorage() ? 0 : mallocSizeOf(beginNoCheck());
+}
+
+template <class T, size_t N, class AP>
+inline size_t
+Vector<T,N,AP>::sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const
+{
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+}
+
+template <class T, size_t N, class AP>
+inline void
+Vector<T,N,AP>::swap(Vector &other)
+{
+ // TODO Implement N != 0
+ JS_STATIC_ASSERT(N == 0);
+
+ // This only works when inline storage is always empty.
+ if (!usingInlineStorage() && other.usingInlineStorage()) {
+ other.mBegin = mBegin;
+ mBegin = inlineStorage();
+ } else if (usingInlineStorage() && !other.usingInlineStorage()) {
+ mBegin = other.mBegin;
+ other.mBegin = other.inlineStorage();
+ } else if (!usingInlineStorage() && !other.usingInlineStorage()) {
+ Swap(mBegin, other.mBegin);
+ } else {
+ // This case is a no-op, since we'd set both to use their inline storage.
+ }
+
+ Swap(mLength, other.mLength);
+ Swap(mCapacity, other.mCapacity);
+#ifdef JS_DEBUG
+ Swap(mReserved, other.mReserved);
+#endif
+}
+
+} /* namespace js */
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#endif /* js_Vector_h */