Exemple #1
0
namespace eastl
{

	/// gpEmptyBucketArray
	///
	/// A shared representation of an empty hash table. This is present so that
	/// a new empty hashtable allocates no memory. It has two entries, one for 
	/// the first lone empty (NULL) bucket, and one for the non-NULL trailing sentinel.
	/// 
	EASTL_API void* gpEmptyBucketArray[2] = { NULL, (void*)uintptr_t(~0) };



	/// gPrimeNumberArray
	///
	/// This is an array of prime numbers. This is the same set of prime
	/// numbers suggested by the C++ standard proposal. These are numbers
	/// which are separated by 8% per entry.
	/// 
	/// To consider: Allow the user to specify their own prime number array.
	///
	const uint32_t gPrimeNumberArray[] =
	{
		2u, 3u, 5u, 7u, 11u, 13u, 17u, 19u, 23u, 29u, 31u,
		37u, 41u, 43u, 47u, 53u, 59u, 61u, 67u, 71u, 73u, 79u,
		83u, 89u, 97u, 103u, 109u, 113u, 127u, 137u, 139u, 149u,
		157u, 167u, 179u, 193u, 199u, 211u, 227u, 241u, 257u,
		277u, 293u, 313u, 337u, 359u, 383u, 409u, 439u, 467u,
		503u, 541u, 577u, 619u, 661u, 709u, 761u, 823u, 887u,
		953u, 1031u, 1109u, 1193u, 1289u, 1381u, 1493u, 1613u,
		1741u, 1879u, 2029u, 2179u, 2357u, 2549u, 2753u, 2971u,
		3209u, 3469u, 3739u, 4027u, 4349u, 4703u, 5087u, 5503u,
		5953u, 6427u, 6949u, 7517u, 8123u, 8783u, 9497u, 10273u,
		11113u, 12011u, 12983u, 14033u, 15173u, 16411u, 17749u,
		19183u, 20753u, 22447u, 24281u, 26267u, 28411u, 30727u,
		33223u, 35933u, 38873u, 42043u, 45481u, 49201u, 53201u,
		57557u, 62233u, 67307u, 72817u, 78779u, 85229u, 92203u,
		99733u, 107897u, 116731u, 126271u, 136607u, 147793u,
		159871u, 172933u, 187091u, 202409u, 218971u, 236897u,
		256279u, 277261u, 299951u, 324503u, 351061u, 379787u,
		410857u, 444487u, 480881u, 520241u, 562841u, 608903u,
		658753u, 712697u, 771049u, 834181u, 902483u, 976369u,
		1056323u, 1142821u, 1236397u, 1337629u, 1447153u, 1565659u,
		1693859u, 1832561u, 1982627u, 2144977u, 2320627u, 2510653u,
		2716249u, 2938679u, 3179303u, 3439651u, 3721303u, 4026031u,
		4355707u, 4712381u, 5098259u, 5515729u, 5967347u, 6456007u,
		6984629u, 7556579u, 8175383u, 8844859u, 9569143u, 10352717u,
		11200489u, 12117689u, 13109983u, 14183539u, 15345007u,
		16601593u, 17961079u, 19431899u, 21023161u, 22744717u,
		24607243u, 26622317u, 28802401u, 31160981u, 33712729u,
		36473443u, 39460231u, 42691603u, 46187573u, 49969847u,
		54061849u, 58488943u, 63278561u, 68460391u, 74066549u,
		80131819u, 86693767u, 93793069u, 101473717u, 109783337u,
		118773397u, 128499677u, 139022417u, 150406843u, 162723577u,
		176048909u, 190465427u, 206062531u, 222936881u, 241193053u,
		260944219u, 282312799u, 305431229u, 330442829u, 357502601u,
		386778277u, 418451333u, 452718089u, 489790921u, 529899637u,
		573292817u, 620239453u, 671030513u, 725980837u, 785430967u,
		849749479u, 919334987u, 994618837u, 1076067617u, 1164186217u,
		1259520799u, 1362662261u, 1474249943u, 1594975441u,
		1725587117u, 1866894511u, 2019773507u, 2185171673u,
		2364114217u, 2557710269u, 2767159799u, 2993761039u,
		3238918481u, 3504151727u, 3791104843u, 4101556399u,
		4294967291u,
		4294967291u // Sentinel so we don't have to test result of lowerBound
	};


	/// kPrimeCount
	///
	/// The number of prime numbers in gPrimeNumberArray.
	///
	const uint32_t kPrimeCount = (sizeof(gPrimeNumberArray) / sizeof(gPrimeNumberArray[0]) - 1);


	/// GetPrevBucketCountOnly
	/// Return a bucket count no greater than nBucketCountHint.
	///
	uint32_t prime_rehash_policy::GetPrevBucketCountOnly(uint32_t nBucketCountHint)
	{
		const uint32_t nPrime = *(eastl::upperBound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint) - 1);
		return nPrime;
	}


	/// GetPrevBucketCount
	/// Return a bucket count no greater than nBucketCountHint.
	/// This function has a side effect of updating mnNextResize.
	///
	uint32_t prime_rehash_policy::GetPrevBucketCount(uint32_t nBucketCountHint) const
	{
		const uint32_t nPrime = *(eastl::upperBound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint) - 1);

		mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
		return nPrime;
	}


	/// GetNextBucketCount
	/// Return a prime no smaller than nBucketCountHint.
	/// This function has a side effect of updating mnNextResize.
	///
	uint32_t prime_rehash_policy::GetNextBucketCount(uint32_t nBucketCountHint) const
	{
		const uint32_t nPrime = *eastl::lowerBound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nBucketCountHint);

		mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
		return nPrime;
	}


	/// GetBucketCount
	/// Return the smallest prime p such that alpha p >= nElementCount, where alpha 
	/// is the load factor. This function has a side effect of updating mnNextResize.
	///
	uint32_t prime_rehash_policy::GetBucketCount(uint32_t nElementCount) const
	{
		const uint32_t nMinBucketCount = (uint32_t)(nElementCount / mfMaxLoadFactor);
		const uint32_t nPrime          = *eastl::lowerBound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, nMinBucketCount);

		mnNextResize = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);
		return nPrime;
	}


	/// GetRehashRequired
	/// Finds the smallest prime p such that alpha p > nElementCount + nElementAdd.
	/// If p > nBucketCount, return pair<bool, uint32_t>(true, p); otherwise return
	/// pair<bool, uint32_t>(false, 0). In principle this isn't very different from GetBucketCount.
	/// This function has a side effect of updating mnNextResize.
	///
	eastl::pair<bool, uint32_t>
	prime_rehash_policy::GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const
	{
		if((nElementCount + nElementAdd) > mnNextResize) // It is significant that we specify > next resize and not >= next resize.
		{
			if(nBucketCount == 1) // We force rehashing to occur if the bucket count is < 2.
				nBucketCount = 0;

			float fMinBucketCount = (nElementCount + nElementAdd) / mfMaxLoadFactor;

			if(fMinBucketCount > (float)nBucketCount)
			{
				fMinBucketCount       = eastl::maxAlt(fMinBucketCount, mfGrowthFactor * nBucketCount);
				const uint32_t nPrime = *eastl::lowerBound(gPrimeNumberArray, gPrimeNumberArray + kPrimeCount, (uint32_t)fMinBucketCount);
				mnNextResize          = (uint32_t)ceilf(nPrime * mfMaxLoadFactor);

				return eastl::pair<bool, uint32_t>(true, nPrime);
			}
			else
			{
				mnNextResize = (uint32_t)ceilf(nBucketCount * mfMaxLoadFactor);
				return eastl::pair<bool, uint32_t>(false, (uint32_t)0);
			}
		}

		return eastl::pair<bool, uint32_t>(false, (uint32_t)0);
	}


} // namespace eastl
Exemple #2
0
/*
 * Allocate `bytes' from the current slab, aligned to kSmartSizeAlign.
 */
void* MemoryManager::slabAlloc(uint32_t bytes, unsigned index) {
  FTRACE(3, "slabAlloc({}, {})\n", bytes, index);
  size_t nbytes = debugAddExtra(smartSizeClass(bytes));

  assert(nbytes <= kSlabSize);
  assert((nbytes & kSmartSizeAlignMask) == 0);
  assert((uintptr_t(m_front) & kSmartSizeAlignMask) == 0);

  if (UNLIKELY(m_bypassSlabAlloc)) {
    // Stats correction; smartMallocSizeBig() pulls stats from jemalloc.
    m_stats.usage -= bytes;
    // smartMallocSizeBig already wraps its allocation in a debug header, but
    // the caller will try to do it again, so we need to adjust this pointer
    // before returning it.
    return ((char*)smartMallocSizeBig<false>(nbytes).ptr) - kDebugExtraSize;
  }

  void* ptr = m_front;
  {
    void* next = (void*)(uintptr_t(ptr) + nbytes);
    if (uintptr_t(next) <= uintptr_t(m_limit)) {
      m_front = next;
    } else {
      ptr = newSlab(nbytes);
    }
  }
  // Preallocate more of the same in order to amortize entry into this method.
  unsigned nPrealloc;
  if (nbytes * kSmartPreallocCountLimit <= kSmartPreallocBytesLimit) {
    nPrealloc = kSmartPreallocCountLimit;
  } else {
    nPrealloc = kSmartPreallocBytesLimit / nbytes;
  }
  {
    void* front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    if (uintptr_t(front) > uintptr_t(m_limit)) {
      nPrealloc = ((uintptr_t)m_limit - uintptr_t(m_front)) / nbytes;
      front = (void*)(uintptr_t(m_front) + nPrealloc*nbytes);
    }
    m_front = front;
  }
  for (void* p = (void*)(uintptr_t(m_front) - nbytes); p != ptr;
       p = (void*)(uintptr_t(p) - nbytes)) {
    auto usable = debugRemoveExtra(nbytes);
    auto ptr = debugPostAllocate(p, usable, usable);
    debugPreFree(ptr, usable, usable);
    m_freelists[index].push(ptr, usable);
  }
  return ptr;
}
    /**
     * Prints a stack backtrace for the current thread to the specified ostream.
     *
     * Does not malloc, does not throw.
     *
     * The format of the backtrace is:
     *
     * ----- BEGIN BACKTRACE -----
     * JSON backtrace
     * Human-readable backtrace
     * -----  END BACKTRACE  -----
     *
     * The JSON backtrace will be a JSON object with a "backtrace" field, and optionally others.
     * The "backtrace" field is an array, whose elements are frame objects.  A frame object has a
     * "b" field, which is the base-address of the library or executable containing the symbol, and
     * an "o" field, which is the offset into said library or executable of the symbol.
     *
     * The JSON backtrace may optionally contain additional information useful to a backtrace
     * analysis tool.  For example, on Linux it contains a subobject named "somap", describing
     * the objects referenced in the "b" fields of the "backtrace" list.
     *
     * @param os    ostream& to receive printed stack backtrace
     */
    void printStackTrace(std::ostream& os) {
        static const char unknownFileName[] = "???";
        void* addresses[maxBackTraceFrames];
        Dl_info dlinfoForFrames[maxBackTraceFrames];

        ////////////////////////////////////////////////////////////
        // Get the backtrace addresses.
        ////////////////////////////////////////////////////////////

        const int addressCount = backtrace(addresses, maxBackTraceFrames);
        if (addressCount == 0) {
            const int err = errno;
            os << "Unable to collect backtrace addresses (errno: " <<
                err << ' ' << strerror(err) << ')' << std::endl;
            return;
        }

        ////////////////////////////////////////////////////////////
        // Collect symbol information for each backtrace address.
        ////////////////////////////////////////////////////////////

        os << std::hex << std::uppercase << '\n';
        for (int i = 0; i < addressCount; ++i) {
            Dl_info& dlinfo(dlinfoForFrames[i]);
            if (!dladdr(addresses[i], &dlinfo)) {
                dlinfo.dli_fname = unknownFileName;
                dlinfo.dli_fbase = NULL;
                dlinfo.dli_sname = NULL;
                dlinfo.dli_saddr = NULL;
            }
            os << ' ' << addresses[i];
        }

        os << "\n----- BEGIN BACKTRACE -----\n";

        ////////////////////////////////////////////////////////////
        // Display the JSON backtrace
        ////////////////////////////////////////////////////////////

        os << "{\"backtrace\":[";
        for (int i = 0; i < addressCount; ++i) {
            const Dl_info& dlinfo = dlinfoForFrames[i];
            const uintptr_t fileOffset = uintptr_t(addresses[i]) - uintptr_t(dlinfo.dli_fbase);
            if (i)
                os << ',';
            os << "{\"b\":\"" << uintptr_t(dlinfo.dli_fbase) <<
                "\",\"o\":\"" << fileOffset << "\"}";
        }
        os << ']';

        if (soMapJson)
            os << ",\"processInfo\":" << *soMapJson;
        os << "}\n";

        ////////////////////////////////////////////////////////////
        // Display the human-readable trace
        ////////////////////////////////////////////////////////////
        for (int i = 0; i < addressCount; ++i) {
            Dl_info& dlinfo(dlinfoForFrames[i]);
            os << ' ';
            if (dlinfo.dli_fbase) {
                os << getBaseName(dlinfo.dli_fname) << '(';
                if (dlinfo.dli_sname) {
                    const uintptr_t offset = uintptr_t(addresses[i]) - uintptr_t(dlinfo.dli_saddr);
                    os << dlinfo.dli_sname << "+0x" << offset;
                }
                else {
                    const uintptr_t offset = uintptr_t(addresses[i]) - uintptr_t(dlinfo.dli_fbase);
                    os << "+0x" << offset;
                }
                os << ')';
            }
            else {
                os << unknownFileName;
            }
            os << " [" << addresses[i] << ']' << std::endl;
        }

        os << std::dec << std::nouppercase;
        os << "-----  END BACKTRACE  -----" << std::endl;
    }
uintptr_t MGL_EXPORT mgl_datac_evaluate_(uintptr_t *d, uintptr_t *idat, uintptr_t *jdat, uintptr_t *kdat, int *norm)
{	return uintptr_t(mgl_datac_evaluate(_DT_,_DA_(idat),_DA_(jdat),_DA_(kdat),*norm));	}
uintptr_t MGL_EXPORT mgl_datac_trace_(uintptr_t *d)
{	return uintptr_t(mgl_datac_trace(_DT_));	}
uintptr_t MGL_EXPORT mgl_datac_resize_box_(uintptr_t *d, int *mx,int *my,int *mz, mreal *x1,mreal *x2, mreal *y1,mreal *y2, mreal *z1,mreal *z2)
{	return uintptr_t(mgl_datac_resize_box(_DT_,*mx,*my,*mz,*x1,*x2,*y1,*y2,*z1,*z2));	}
uintptr_t MGL_EXPORT mgl_datac_sum_(uintptr_t *d, const char *dir,int l)
{	char *s=new char[l+1];	memcpy(s,dir,l);	s[l]=0;
	uintptr_t r=uintptr_t(mgl_datac_sum(_DT_,s));	delete []s;	return r;	}
 BOOST_INTRUSIVE_FORCEINLINE static void set_bits(pointer &n, std::size_t c)
 {
    BOOST_INTRUSIVE_INVARIANT_ASSERT(uintptr_t(c) <= Mask);
    n = pointer(uintptr_t((get_pointer)(n)) | uintptr_t(c));
 }
Exemple #9
0
static ssize_t h_idx_from_data(HDATA* hd)
{
	if(!pool_contains(&hpool, hd))
		WARN_RETURN(ERR::INVALID_POINTER);
	return (uintptr_t(hd) - uintptr_t(hpool.da.base))/hpool.el_size;
}
 BOOST_INTRUSIVE_FORCEINLINE static void set_pointer(pointer &n, pointer p)
 {
    BOOST_INTRUSIVE_INVARIANT_ASSERT(0 == (uintptr_t(p) & Mask));
    n = pointer(uintptr_t(p) | (uintptr_t(n) & Mask));
 }
 BOOST_INTRUSIVE_FORCEINLINE static std::size_t get_bits(pointer n)
 {  return std::size_t(uintptr_t(n) & Mask);  }
 BOOST_INTRUSIVE_FORCEINLINE static pointer get_pointer(pointer n)
 {  return pointer(uintptr_t(n) & uintptr_t(~Mask));  }
Exemple #13
0
namespace internal {
	using UserTypeID = const void*;

	template <typename T>
	using StripUserType = std::remove_cv_t<T>;

	/**
	 * User type identifier
	 */
	template <typename T> extern
	const UserTypeID user_type_id = (void*) INTPTR_MAX;

	/**
	 * Registry name for a metatable which is associated with a user type
	 */
	template <typename T> extern
	const std::string user_type_reg_name =
		"UD#" + std::to_string(uintptr_t(&user_type_id<StripUserType<T>>));

	/**
	 * Register a new metatable for a user type T.
	 */
	template <typename U> static inline
	void new_user_type_metatable(State* state) {
		using T = StripUserType<U>;
		luaL_newmetatable(state, user_type_reg_name<T>.c_str());
	}

	/**
	 * Check if the value at the given index if a user type T.
	 */
	template <typename U> static inline
	StripUserType<U>* check_user_type(State* state, int index) {
		using T = StripUserType<U>;

		return static_cast<T*>(luaL_checkudata(state, index, user_type_reg_name<T>.c_str()));
	}

	/**
	 * Apply U's metatable for the value at the top of the stack.
	 */
	template <typename U> static inline
	void apply_user_type_meta_table(State* state) {
		using T = StripUserType<U>;

		luaL_getmetatable(state, user_type_reg_name<T>.c_str());
		lua_setmetatable(state, -2);
	}

	/**
	 * Lua C function to construct a user type T with parameters A
	 */
	template <typename U, typename... A> static inline
	int construct_user_type(State* state) {
		return internal::Layout<int(A...)>::direct(
			state,
			1,
			&Value<StripUserType<U>&>::template push<A...>,
			state
		);
	}

	/**
	 * Lua C function to destruct a user type T
	 */
	template <typename U> static inline
	int destruct_user_type(State* state) {
		using T = StripUserType<U>;

		if (!lua_islightuserdata(state, 1))
			Value<T&>::read(state, 1).~T();

		return 0;
	}

	/**
	 * Create a string representation for user type T.
	 */
	template <typename U> static
	int stringify_user_type(State* state) {
		using T = StripUserType<U>;

		return Value<std::string>::push(
			state,
			internal::user_type_reg_name<T>
				+ "@"
				+ std::to_string(uintptr_t(Value<T*>::read(state, 1)))
		);
	}

	/**
	 * Helper struct for wrapping user type fields
	 */
	template <typename U, typename R>
	struct FieldWrapper {
		using T = StripUserType<U>;

		template <R T::* field_pointer> static inline
		int invoke(State* state) {
			if (lua_gettop(state) > 1) {
				// Setter
				Value<T*>::read(state, 1)->*field_pointer = Value<R>::read(state, 2);
				return 0;
			} else {
				// Getter
				return push(state, Value<T*>::read(state, 1)->*field_pointer);
			}
		}
	};

	// 'const'-qualified fields
	template <typename U, typename R>
	struct FieldWrapper<U, const R> {
		using T = StripUserType<U>;

		template <const R T::* field_pointer> static inline
		int invoke(State* state) {
			return push(state, Value<T*>::read(state, 1)->*field_pointer);
		}
	};

	template <typename T>
	struct FieldWrapperHelper {
		static_assert(
			sizeof(T) == -1,
			"Parameter to FieldWrapperHelper is not a function pointer"
		);
	};

	template <typename T, typename R>
	struct FieldWrapperHelper<R T::*>: FieldWrapper<T, R> {};

	/**
	 * Helper struct for wrapping user type methods
	 */
	template <typename T, typename S>
	struct MethodWrapper {
		static_assert(
			sizeof(T) == -1,
			"Undefined template MethodWrapper"
		);
	};

	// 'const volatile'-qualified methods
	template <typename T, typename R, typename... A>
	struct MethodWrapper<const volatile T, R(A...)> {
		using MethodPointerType = R (T::*)(A...) const volatile;
		using FunctionSignature = R (const volatile T*, A...);

		template <MethodPointerType method_pointer> static inline
		R call(const volatile T* parent, A... args) {
			return (parent->*method_pointer)(std::forward<A>(args)...);
		}

		template <MethodPointerType method_pointer> static inline
		int invoke(State* state) {
			return FunctionWrapper<FunctionSignature>::template invoke<call<method_pointer>>(state);
		}
	};

	// 'const'-qualified methods
	template <typename T, typename R, typename... A>
	struct MethodWrapper<const T, R(A...)> {
		using MethodPointerType = R (T::*)(A...) const;
		using FunctionSignature = R (const T*, A...);

		template <MethodPointerType method_pointer> static inline
		R call(const T* parent, A... args) {
			return (parent->*method_pointer)(std::forward<A>(args)...);
		}

		template <MethodPointerType method_pointer> static inline
		int invoke(State* state) {
			return FunctionWrapper<FunctionSignature>::template invoke<call<method_pointer>>(state);
		}
	};

	// 'volatile'-qualified methods
	template <typename T, typename R, typename... A>
	struct MethodWrapper<volatile T, R(A...)> {
		using MethodPointerType = R (T::*)(A...) volatile;
		using FunctionSignature = R (volatile T*, A...);

		template <MethodPointerType method_pointer> static inline
		R call(volatile T* parent, A... args) {
			return (parent->*method_pointer)(std::forward<A>(args)...);
		}

		template <MethodPointerType method_pointer> static inline
		int invoke(State* state) {
			return FunctionWrapper<FunctionSignature>::template invoke<call<method_pointer>>(state);
		}
	};

	// unqualified methods
	template <typename T, typename R, typename... A>
	struct MethodWrapper<T, R(A...)> {
		using MethodPointerType = R (T::*)(A...);
		using FunctionSignature = R (T*, A...);

		template <MethodPointerType method_pointer> static inline
		R call(T* parent, A... args) {
			return (parent->*method_pointer)(std::forward<A>(args)...);
		}

		template <MethodPointerType method_pointer> static inline
		int invoke(State* state) {
			return FunctionWrapper<FunctionSignature>::template invoke<call<method_pointer>>(state);
		}
	};

	template <typename T>
	struct MethodWrapperHelper {
		static_assert(
			sizeof(T) == -1,
			"Parameter to MethodWrapperHelper is not a function pointer"
		);
	};

	template <typename T, typename R, typename... A>
	struct MethodWrapperHelper<R (T::*)(A...) const volatile>:
		MethodWrapper<const volatile T, R(A...)>
	{};

	template <typename T, typename R, typename... A>
	struct MethodWrapperHelper<R (T::*)(A...) const>:
		MethodWrapper<const T, R(A...)>
	{};

	template <typename T, typename R, typename... A>
	struct MethodWrapperHelper<R (T::*)(A...) volatile>:
		MethodWrapper<volatile T, R(A...)>
	{};

	template <typename T, typename R, typename... A>
	struct MethodWrapperHelper<R (T::*)(A...)>:
		MethodWrapper<T, R(A...)>
	{};
}
bool isAligned(const void* const pointer, asUINT alignment)
{
	return (uintptr_t(pointer) % alignment) == 0;
}
uintptr_t MGL_EXPORT mgl_datac_subdata_ext_(uintptr_t *d, uintptr_t *xx, uintptr_t *yy, uintptr_t *zz)
{	return uintptr_t(mgl_datac_subdata_ext(_DT_,_DA_(xx),_DA_(yy),_DA_(zz)));	}
Exemple #16
0
// TODO: what if iterating through all handles is too slow?
Status h_reload(const PIVFS& vfs, const VfsPath& pathname)
{
	H_ScopedLock s;

	const u32 key = fnv_hash(pathname.string().c_str(), pathname.string().length()*sizeof(pathname.string()[0]));

	// destroy (note: not free!) all handles backed by this file.
	// do this before reloading any of them, because we don't specify reload
	// order (the parent resource may be reloaded first, and load the child,
	// whose original data would leak).
	for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size))
	{
		if(hd->key == 0 || hd->key != key || hd->disallow_reload)
			continue;
		hd->type->dtor(hd->user);
	}

	Status ret = INFO::OK;

	// now reload all affected handles
	size_t i = 0;
	for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size), i++)
	{
		if(hd->key == 0 || hd->key != key || hd->disallow_reload)
			continue;

		Status err = hd->type->reload(hd->user, vfs, hd->pathname, hd->h);
		// don't stop if an error is encountered - try to reload them all.
		if(err < 0)
		{
			h_free(hd->h, hd->type);
			if(ret == 0)	// don't overwrite first error
				ret = err;
		}
		else
			warn_if_invalid(hd);
	}

	return ret;
}
uintptr_t MGL_EXPORT mgl_datac_resize_(uintptr_t *d, int *mx,int *my,int *mz)
{	return uintptr_t(mgl_datac_resize(_DT_,*mx,*my,*mz));	}
Exemple #18
0
static void Shutdown()
{
	debug_printf("H_MGR| shutdown. any handle frees after this are leaks!\n");
	// objects that store handles to other objects are destroyed before their
	// children, so the subsequent forced destruction of the child here will
	// raise a double-free warning unless we ignore it. (#860, #915, #920)
	ignoreDoubleFree = true;

	H_ScopedLock s;

	// forcibly close all open handles
	for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size))
	{
		// it's already been freed; don't free again so that this
		// doesn't look like an error.
		if(hd->key == 0)
			continue;

		// disable caching; we need to release the resource now.
		hd->keep_open = 0;
		hd->refs = 0;

		h_free_hd(hd);
	}

	pool_destroy(&hpool);
}
uintptr_t MGL_EXPORT mgl_datac_combine_(uintptr_t *a, uintptr_t *b)
{	return uintptr_t(mgl_datac_combine(_DA_(a),_DA_(b)));	}
Exemple #20
0
void h_mgr_free_type(const H_Type type)
{
	ignoreDoubleFree = true;

	H_ScopedLock s;

	// forcibly close all open handles of the specified type
	for(HDATA* hd = (HDATA*)hpool.da.base; hd < (HDATA*)(hpool.da.base + hpool.da.pos); hd = (HDATA*)(uintptr_t(hd)+hpool.el_size))
	{
		// free if not previously freed and only free the proper type
		if (hd->key == 0 || hd->type != type)
			continue;

		// disable caching; we need to release the resource now.
		hd->keep_open = 0;
		hd->refs = 0;

		h_free_hd(hd);
	}
}
uintptr_t MGL_EXPORT mgl_datac_momentum_(uintptr_t *d, char *dir, const char *how, int,int l)
{	char *s=new char[l+1];	memcpy(s,how,l);	s[l]=0;
	uintptr_t r=uintptr_t(mgl_datac_momentum(_DT_,*dir, s));	delete []s;	return r;	}
Exemple #22
0
uintptr_t MGL_EXPORT mgl_create_parser_()	{	return uintptr_t(new mglParser);	}
uintptr_t MGL_EXPORT mgl_datac_column_(uintptr_t *d, const char *eq,int l)
{	char *s=new char[l+1];	memcpy(s,eq,l);	s[l]=0;
	uintptr_t r = uintptr_t(mgl_datac_column(_DT_,s));
	delete []s;	return r;	}
Exemple #24
0
/*===!!! NOTE !!! You must not delete obtained data arrays !!!===============*/
uintptr_t MGL_EXPORT mgl_parser_find_var_(uintptr_t* p, const char *name, int l)
{	char *s=new char[l+1];		memcpy(s,name,l);	s[l]=0;
	mglDataA *v=_PR_->FindVar(s);	delete []s;	return uintptr_t(v);	}
/*
 * The meat of our exception handler. This thread waits for an exception
 * message, annotates the exception if needed, then forwards it to the
 * previously installed handler (which will likely terminate the process).
 */
static void
MachExceptionHandler()
{
    kern_return_t ret;
    MachExceptionParameters& current = sMachExceptionState.current;
    MachExceptionParameters& previous = sMachExceptionState.previous;

    // We use the simplest kind of 64-bit exception message here.
    ExceptionRequest64 request = {};
    request.header.msgh_local_port = current.port;
    request.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(request));
    ret = mach_msg(&request.header, MACH_RCV_MSG, 0, request.header.msgh_size,
                   current.port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);

    // Restore the previous handler. We're going to forward to it
    // anyway, and if we crash while doing so we don't want to hang.
    task_set_exception_ports(mach_task_self(), previous.mask, previous.port,
                             previous.behavior, previous.flavor);

    // If we failed even receiving the message, just give up.
    if (ret != MACH_MSG_SUCCESS)
        MOZ_CRASH("MachExceptionHandler: mach_msg failed to receive a message!");

    // Terminate the thread if we're shutting down.
    if (request.header.msgh_id == sIDQuit)
        return;

    // The only other valid message ID is the one associated with the
    // EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES behavior we chose.
    if (request.header.msgh_id != sIDRequest64)
        MOZ_CRASH("MachExceptionHandler: Unexpected Message ID!");

    // Make sure we can understand the exception we received.
    if (request.exception != EXC_BAD_ACCESS || request.code_count != 2)
        MOZ_CRASH("MachExceptionHandler: Unexpected exception type!");

    // Get the address that the offending code tried to access.
    uintptr_t address = uintptr_t(request.code[1]);

    // If the faulting address is inside one of our protected regions, we
    // want to annotate the crash to make it stand out from the crowd.
    if (sProtectedRegions.isProtected(address)) {
        ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
        MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
    }

    // Forward to the previous handler which may be a debugger, the unix
    // signal handler, the crash reporter or something else entirely.
    if (previous.port != MACH_PORT_NULL) {
        mach_msg_type_number_t stateCount;
        thread_state_data_t state;
        if ((uint32_t(previous.behavior) & ~MACH_EXCEPTION_CODES) != EXCEPTION_DEFAULT) {
            // If the previous handler requested thread state, get it here.
            stateCount = THREAD_STATE_MAX;
            ret = thread_get_state(request.thread.name, previous.flavor, state, &stateCount);
            if (ret != KERN_SUCCESS)
                MOZ_CRASH("MachExceptionHandler: Could not get the thread state to forward!");
        }

        // Depending on the behavior of the previous handler, the forwarded
        // exception message will have a different set of fields.
        // Of particular note is that exception handlers that lack
        // MACH_EXCEPTION_CODES will get 32-bit fields even on 64-bit
        // systems. It appears that OSX simply truncates these fields.
        ExceptionRequestUnion forward;
        switch (uint32_t(previous.behavior)) {
          case EXCEPTION_DEFAULT:
             CopyExceptionRequest32(request, forward.r32);
             break;
          case EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES:
             CopyExceptionRequest64(request, forward.r64);
             break;
          case EXCEPTION_STATE:
             CopyExceptionRequestState32(request, forward.rs32,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE | MACH_EXCEPTION_CODES:
             CopyExceptionRequestState64(request, forward.rs64,
                                         previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY:
             CopyExceptionRequestStateIdentity32(request, forward.rsi32,
                                                 previous.flavor, stateCount, state);
             break;
          case EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES:
             CopyExceptionRequestStateIdentity64(request, forward.rsi64,
                                                 previous.flavor, stateCount, state);
             break;
          default:
             MOZ_CRASH("MachExceptionHandler: Unknown previous handler behavior!");
        }

        // Forward the generated message to the old port. The local and remote
        // port fields *and their rights* are swapped on arrival, so we need to
        // swap them back first.
        forward.header.msgh_bits = (request.header.msgh_bits & ~MACH_MSGH_BITS_PORTS_MASK) |
            MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(request.header.msgh_bits),
                           MACH_MSGH_BITS_REMOTE(request.header.msgh_bits));
        forward.header.msgh_local_port = forward.header.msgh_remote_port;
        forward.header.msgh_remote_port = previous.port;
        ret = mach_msg(&forward.header, MACH_SEND_MSG, forward.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the previous handler!");
    } else {
        // There was no previous task-level exception handler, so defer to the
        // host level one instead. We set the return code to KERN_FAILURE to
        // indicate that we did not handle the exception.
        // The reply message ID is always the request ID + 100.
        ExceptionReply reply = {};
        reply.header.msgh_bits =
            MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.header.msgh_bits), 0);
        reply.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(reply));
        reply.header.msgh_remote_port = request.header.msgh_remote_port;
        reply.header.msgh_local_port = MACH_PORT_NULL;
        reply.header.msgh_id = request.header.msgh_id + 100;
        reply.NDR = request.NDR;
        reply.RetCode = KERN_FAILURE;
        ret = mach_msg(&reply.header, MACH_SEND_MSG, reply.header.msgh_size, 0,
                       MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        if (ret != MACH_MSG_SUCCESS)
            MOZ_CRASH("MachExceptionHandler: Failed to forward to the host level!");
    }
}
Exemple #26
0
uintptr_t MGL_EXPORT mgl_parser_get_var_(uintptr_t* p, unsigned long *id)
{	return uintptr_t(mgl_parser_get_var(_PR_,*id));	}
Exemple #27
0
ResourceData::ResourceData() : m_count(0) {
  assert(uintptr_t(this) % sizeof(TypedValue) == 0);
  int& pmax = *os_max_resource_id;
  if (pmax < 3) pmax = 3; // reserving 1, 2, 3 for STDIN, STDOUT, STDERR
  o_id = ++pmax;
}
//-----------------------------------------------------------------------------
uintptr_t MGL_EXPORT mgl_datac_subdata_(uintptr_t *d, int *xx,int *yy,int *zz)
{	return uintptr_t(mgl_datac_subdata(_DT_,*xx,*yy,*zz));	}
Exemple #29
0
void relocate(std::vector<TransRelocInfo>& relocs, CodeBlock& dest,
              CGMeta& fixups) {
  assertOwnsCodeLock();
  assert(!Func::s_treadmill);

  auto newRelocMapName = Debug::DebugInfo::Get()->getRelocMapName() + ".tmp";
  auto newRelocMap = fopen(newRelocMapName.c_str(), "w+");
  if (!newRelocMap) return;

  SCOPE_EXIT {
    if (newRelocMap) fclose(newRelocMap);
  };

  Func::s_treadmill = true;
  SCOPE_EXIT {
    Func::s_treadmill = false;
  };

  auto ignoreEntry = [](const SrcKey& sk) {
    // We can have entries such as UniqueStubs with no SrcKey
    // These are ok to process.
    if (!sk.valid()) return false;
    // But if the func has been removed from the AtomicHashMap,
    // we don't want to process it.
    return !Func::isFuncIdValid(sk.funcID());
  };

  RelocationInfo rel;
  size_t num = 0;
  assert(fixups.alignments.empty());
  for (size_t sz = relocs.size(); num < sz; num++) {
    auto& reloc = relocs[num];
    if (ignoreEntry(reloc.sk)) continue;
    auto start DEBUG_ONLY = dest.frontier();
    try {
      x64::relocate(rel, dest,
                    reloc.start, reloc.end, reloc.fixups, nullptr);
    } catch (const DataBlockFull& dbf) {
      break;
    }
    if (Trace::moduleEnabledRelease(Trace::mcg, 1)) {
      Trace::traceRelease(
        folly::sformat("Relocate: 0x{:08x}+0x{:04x} => 0x{:08x}+0x{:04x}\n",
                       (uintptr_t)reloc.start, reloc.end - reloc.start,
                       (uintptr_t)start, dest.frontier() - start));
    }
  }
  swap_trick(fixups.alignments);
  assert(fixups.empty());

  x64::adjustForRelocation(rel);

  for (size_t i = 0; i < num; i++) {
    if (!ignoreEntry(relocs[i].sk)) {
      x64::adjustMetaDataForRelocation(rel, nullptr, relocs[i].fixups);
    }
  }

  for (size_t i = 0; i < num; i++) {
    if (!ignoreEntry(relocs[i].sk)) {
      relocs[i].fixups.process_only(nullptr);
    }
  }

  // At this point, all the relocated code should be correct, and runable.
  // But eg if it has unlikely paths into cold code that has not been relocated,
  // then the cold code will still point back to the original, not the relocated
  // versions. Similarly reusable stubs will still point to the old code.
  // Since we can now execute the relocated code, its ok to start fixing these
  // things now.

  for (auto& it : srcDB()) {
    it.second->relocate(rel);
  }

  std::unordered_set<Func*> visitedFuncs;
  CodeSmasher s;
  for (size_t i = 0; i < num; i++) {
    auto& reloc = relocs[i];
    if (ignoreEntry(reloc.sk)) continue;
    for (auto& ib : reloc.incomingBranches) {
      ib.relocate(rel);
    }
    if (!reloc.sk.valid()) continue;
    auto f = const_cast<Func*>(reloc.sk.func());

    x64::adjustCodeForRelocation(rel, reloc.fixups);
    reloc.fixups.clear();

    // fixup code references in the corresponding cold block to point
    // to the new code
    x64::adjustForRelocation(rel, reloc.coldStart, reloc.coldEnd);

    if (visitedFuncs.insert(f).second) {
      if (auto adjusted = rel.adjustedAddressAfter(f->getFuncBody())) {
        f->setFuncBody(adjusted);
      }
      int num = Func::getMaxNumPrologues(f->numParams());
      if (num < kNumFixedPrologues) num = kNumFixedPrologues;
      while (num--) {
        auto addr = f->getPrologue(num);
        if (auto adjusted = rel.adjustedAddressAfter(addr)) {
          f->setPrologue(num, adjusted);
        }
      }
    }
    if (reloc.end != reloc.start) {
      s.entries.emplace_back(reloc.start, reloc.end);
    }
  }

  auto relocMap = Debug::DebugInfo::Get()->getRelocMap();
  always_assert(relocMap);
  fseek(relocMap, 0, SEEK_SET);

  auto deadStubs = getFreeTCStubs();
  auto param = PostProcessParam { rel, deadStubs, newRelocMap };
  std::set<TCA> liveStubs;
  readRelocations(relocMap, &liveStubs, postProcess, &param);

  // ensure that any reusable stubs are updated for the relocated code
  for (auto stub : liveStubs) {
    FTRACE(1, "Stub: 0x{:08x}\n", (uintptr_t)stub);
    fixups.reusedStubs.emplace_back(stub);
    always_assert(!rel.adjustedAddressAfter(stub));
    fprintf(newRelocMap, "%" PRIxPTR " 0 %s\n", uintptr_t(stub), "NewStub");
  }
  x64::adjustCodeForRelocation(rel, fixups);

  unlink(Debug::DebugInfo::Get()->getRelocMapName().c_str());
  rename(newRelocMapName.c_str(),
         Debug::DebugInfo::Get()->getRelocMapName().c_str());
  fclose(newRelocMap);
  newRelocMap = nullptr;
  freopen(Debug::DebugInfo::Get()->getRelocMapName().c_str(), "r+", relocMap);
  fseek(relocMap, 0, SEEK_END);

  okToRelocate = false;
  Treadmill::enqueue(std::move(s));
}
 static std::size_t get_bits(pointer n)
 {  return std::size_t(uintptr_t(n) & Mask);  }