update libcxxabi to llvm 14.0.6

This commit is contained in:
Andrew Kelley 2022-07-01 16:36:40 -07:00
parent bd680139d0
commit aa964bd555
31 changed files with 1174 additions and 1190 deletions

View File

@ -1,4 +1,4 @@
//===-------------------------- __cxxabi_config.h -------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -93,7 +93,7 @@
# if !__has_feature(cxx_exceptions)
# define _LIBCXXABI_NO_EXCEPTIONS
# endif
#elif defined(_LIBCXXABI_COMPILER_GCC) && !__EXCEPTIONS
#elif defined(_LIBCXXABI_COMPILER_GCC) && !defined(__EXCEPTIONS)
# define _LIBCXXABI_NO_EXCEPTIONS
#endif

View File

@ -1,4 +1,4 @@
//===--------------------------- cxxabi.h ---------------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------- abort_message.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===-------------------------- abort_message.h-----------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------ cxa_aux_runtime.cpp -------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,11 +1,12 @@
//===------------------------- cxa_default_handlers.cpp -------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//
// This file implements the default terminate_handler and unexpected_handler.
// This file implements the default terminate_handler, unexpected_handler and
// new_handler.
//===----------------------------------------------------------------------===//
#include <exception>
@ -15,7 +16,7 @@
#include "cxa_handlers.h"
#include "cxa_exception.h"
#include "private_typeinfo.h"
#include "include/atomic_support.h"
#include "include/atomic_support.h" // from libc++
#if !defined(LIBCXXABI_SILENT_TERMINATE)
@ -104,6 +105,9 @@ _LIBCPP_SAFE_STATIC std::terminate_handler __cxa_terminate_handler = default_ter
_LIBCXXABI_DATA_VIS
_LIBCPP_SAFE_STATIC std::unexpected_handler __cxa_unexpected_handler = default_unexpected_handler;
_LIBCXXABI_DATA_VIS
_LIBCPP_SAFE_STATIC std::new_handler __cxa_new_handler = 0;
namespace std
{
@ -125,4 +129,10 @@ set_terminate(terminate_handler func) noexcept
_AO_Acq_Rel);
}
new_handler
set_new_handler(new_handler handler) noexcept
{
return __libcpp_atomic_exchange(&__cxa_new_handler, handler, _AO_Acq_Rel);
}
}

View File

@ -1,4 +1,4 @@
//===-------------------------- cxa_demangle.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -342,21 +342,21 @@ __cxa_demangle(const char *MangledName, char *Buf, size_t *N, int *Status) {
int InternalStatus = demangle_success;
Demangler Parser(MangledName, MangledName + std::strlen(MangledName));
OutputStream S;
OutputBuffer O;
Node *AST = Parser.parse();
if (AST == nullptr)
InternalStatus = demangle_invalid_mangled_name;
else if (!initializeOutputStream(Buf, N, S, 1024))
else if (!initializeOutputBuffer(Buf, N, O, 1024))
InternalStatus = demangle_memory_alloc_failure;
else {
assert(Parser.ForwardTemplateRefs.empty());
AST->print(S);
S += '\0';
AST->print(O);
O += '\0';
if (N != nullptr)
*N = S.getCurrentPosition();
Buf = S.getBuffer();
*N = O.getCurrentPosition();
Buf = O.getBuffer();
}
if (Status)

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_exception.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -17,7 +17,7 @@
#include "cxa_exception.h"
#include "cxa_handlers.h"
#include "fallback_malloc.h"
#include "include/atomic_support.h"
#include "include/atomic_support.h" // from libc++
#if __has_feature(address_sanitizer)
#include <sanitizer/asan_interface.h>
@ -341,8 +341,10 @@ unwinding with _Unwind_Resume.
According to ARM EHABI 8.4.1, __cxa_end_cleanup() should not clobber any
register, thus we have to write this function in assembly so that we can save
{r1, r2, r3}. We don't have to save r0 because it is the return value and the
first argument to _Unwind_Resume(). In addition, we are saving r4 in order to
align the stack to 16 bytes, even though it is a callee-save register.
first argument to _Unwind_Resume(). In addition, we are saving lr in order to
align the stack to 16 bytes and lr will be used to identify the caller and its
frame information. _Unwind_Resume never return and we need to keep the original
lr so just branch to it.
*/
__attribute__((used)) static _Unwind_Exception *
__cxa_end_cleanup_impl()
@ -372,18 +374,24 @@ __cxa_end_cleanup_impl()
return &exception_header->unwindHeader;
}
asm (
" .pushsection .text.__cxa_end_cleanup,\"ax\",%progbits\n"
asm(" .pushsection .text.__cxa_end_cleanup,\"ax\",%progbits\n"
" .globl __cxa_end_cleanup\n"
" .type __cxa_end_cleanup,%function\n"
"__cxa_end_cleanup:\n"
" push {r1, r2, r3, r4}\n"
#if defined(__ARM_FEATURE_BTI_DEFAULT)
" bti\n"
#endif
" push {r1, r2, r3, lr}\n"
" bl __cxa_end_cleanup_impl\n"
" pop {r1, r2, r3, r4}\n"
" bl _Unwind_Resume\n"
" bl abort\n"
" .popsection"
);
" mov lr, r4\n"
#if defined(LIBCXXABI_BAREMETAL)
" ldr r4, =_Unwind_Resume\n"
" bx r4\n"
#else
" b _Unwind_Resume\n"
#endif
" .popsection");
#endif // defined(_LIBCXXABI_ARM_EHABI)
/*

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_exception.h ----------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===--------------------- cxa_exception_storage.cpp ----------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -21,25 +21,24 @@ extern "C" {
static __cxa_eh_globals eh_globals;
__cxa_eh_globals *__cxa_get_globals() { return &eh_globals; }
__cxa_eh_globals *__cxa_get_globals_fast() { return &eh_globals; }
}
}
} // extern "C"
} // namespace __cxxabiv1
#elif defined(HAS_THREAD_LOCAL)
namespace __cxxabiv1 {
namespace {
__cxa_eh_globals * __globals () {
__cxa_eh_globals *__globals() {
static thread_local __cxa_eh_globals eh_globals;
return &eh_globals;
}
}
} // namespace
extern "C" {
__cxa_eh_globals * __cxa_get_globals () { return __globals (); }
__cxa_eh_globals * __cxa_get_globals_fast () { return __globals (); }
}
}
__cxa_eh_globals *__cxa_get_globals() { return __globals(); }
__cxa_eh_globals *__cxa_get_globals_fast() { return __globals(); }
} // extern "C"
} // namespace __cxxabiv1
#else
@ -59,47 +58,46 @@ namespace {
std::__libcpp_tls_key key_;
std::__libcpp_exec_once_flag flag_ = _LIBCPP_EXEC_ONCE_INITIALIZER;
void _LIBCPP_TLS_DESTRUCTOR_CC destruct_ (void *p) {
__free_with_fallback ( p );
if ( 0 != std::__libcpp_tls_set ( key_, NULL ) )
void _LIBCPP_TLS_DESTRUCTOR_CC destruct_(void *p) {
__free_with_fallback(p);
if (0 != std::__libcpp_tls_set(key_, NULL))
abort_message("cannot zero out thread value for __cxa_get_globals()");
}
}
void construct_ () {
if ( 0 != std::__libcpp_tls_create ( &key_, destruct_ ) )
void construct_() {
if (0 != std::__libcpp_tls_create(&key_, destruct_))
abort_message("cannot create thread specific key for __cxa_get_globals()");
}
}
}
} // namespace
extern "C" {
__cxa_eh_globals * __cxa_get_globals () {
// Try to get the globals for this thread
__cxa_eh_globals* retVal = __cxa_get_globals_fast ();
__cxa_eh_globals *__cxa_get_globals() {
// Try to get the globals for this thread
__cxa_eh_globals *retVal = __cxa_get_globals_fast();
// If this is the first time we've been asked for these globals, create them
if ( NULL == retVal ) {
retVal = static_cast<__cxa_eh_globals*>
(__calloc_with_fallback (1, sizeof (__cxa_eh_globals)));
if ( NULL == retVal )
// If this is the first time we've been asked for these globals, create them
if (NULL == retVal) {
retVal = static_cast<__cxa_eh_globals*>(
__calloc_with_fallback(1, sizeof(__cxa_eh_globals)));
if (NULL == retVal)
abort_message("cannot allocate __cxa_eh_globals");
if ( 0 != std::__libcpp_tls_set ( key_, retVal ) )
if (0 != std::__libcpp_tls_set(key_, retVal))
abort_message("std::__libcpp_tls_set failure in __cxa_get_globals()");
}
return retVal;
}
return retVal;
}
// Note that this implementation will reliably return NULL if not
// preceded by a call to __cxa_get_globals(). This is an extension
// to the Itanium ABI and is taken advantage of in several places in
// libc++abi.
__cxa_eh_globals * __cxa_get_globals_fast () {
// First time through, create the key.
__cxa_eh_globals *__cxa_get_globals_fast() {
// First time through, create the key.
if (0 != std::__libcpp_execute_once(&flag_, construct_))
abort_message("execute once failure in __cxa_get_globals_fast()");
// static int init = construct_();
return static_cast<__cxa_eh_globals*>(std::__libcpp_tls_get(key_));
}
}
} // extern "C"
} // namespace __cxxabiv1
}
}
#endif

View File

@ -1,4 +1,4 @@
//===---------------------------- cxa_guard.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -23,9 +23,15 @@
* the thread currently performing initialization is stored in the second word.
*
* Guard Object Layout:
* -------------------------------------------------------------------------
* |a: guard byte | a+1: init byte | a+2 : unused ... | a+4: thread-id ... |
* ------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* | a+0: guard byte | a+1: init byte | a+2: unused ... | a+4: thread-id ... |
* ---------------------------------------------------------------------------
*
* Note that we don't do what the ABI docs suggest (put a mutex in the guard
* object which we acquire in cxa_guard_acquire and release in
* cxa_guard_release). Instead we use the init byte to imitate that behaviour,
* but without actually holding anything mutex related between aquire and
* release/abort.
*
* Access Protocol:
* For each implementation the guard byte is checked and set before accessing
@ -38,28 +44,31 @@
*/
#include "__cxxabi_config.h"
#include "include/atomic_support.h"
#include <unistd.h>
#include "include/atomic_support.h" // from libc++
#if defined(__has_include)
# if __has_include(<sys/syscall.h>)
# include <sys/syscall.h>
# endif
# if __has_include(<sys/syscall.h>)
# include <sys/syscall.h>
# endif
# if __has_include(<unistd.h>)
# include <unistd.h>
# endif
#endif
#include <limits.h>
#include <stdlib.h>
#include <__threading_support>
#ifndef _LIBCXXABI_HAS_NO_THREADS
#if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
#pragma comment(lib, "pthread")
#endif
# if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
# pragma comment(lib, "pthread")
# endif
#endif
#if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wtautological-pointer-compare"
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wtautological-pointer-compare"
#elif defined(__GNUC__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Waddress"
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Waddress"
#endif
// To make testing possible, this header is included from both cxa_guard.cpp
@ -74,20 +83,20 @@
// defined when including this file. Only `src/cxa_guard.cpp` should define
// the former.
#ifdef BUILDING_CXA_GUARD
# include "abort_message.h"
# define ABORT_WITH_MESSAGE(...) ::abort_message(__VA_ARGS__)
# include "abort_message.h"
# define ABORT_WITH_MESSAGE(...) ::abort_message(__VA_ARGS__)
#elif defined(TESTING_CXA_GUARD)
# define ABORT_WITH_MESSAGE(...) ::abort()
# define ABORT_WITH_MESSAGE(...) ::abort()
#else
# error "Either BUILDING_CXA_GUARD or TESTING_CXA_GUARD must be defined"
# error "Either BUILDING_CXA_GUARD or TESTING_CXA_GUARD must be defined"
#endif
#if __has_feature(thread_sanitizer)
extern "C" void __tsan_acquire(void*);
extern "C" void __tsan_release(void*);
#else
#define __tsan_acquire(addr) ((void)0)
#define __tsan_release(addr) ((void)0)
# define __tsan_acquire(addr) ((void)0)
# define __tsan_release(addr) ((void)0)
#endif
namespace __cxxabiv1 {
@ -99,7 +108,7 @@ namespace {
// Misc Utilities
//===----------------------------------------------------------------------===//
template <class T, T(*Init)()>
template <class T, T (*Init)()>
struct LazyValue {
LazyValue() : is_init(false) {}
@ -110,7 +119,8 @@ struct LazyValue {
}
return value;
}
private:
private:
T value;
bool is_init = false;
};
@ -120,25 +130,19 @@ class AtomicInt {
public:
using MemoryOrder = std::__libcpp_atomic_order;
explicit AtomicInt(IntType *b) : b_(b) {}
explicit AtomicInt(IntType* b) : b_(b) {}
AtomicInt(AtomicInt const&) = delete;
AtomicInt& operator=(AtomicInt const&) = delete;
IntType load(MemoryOrder ord) {
return std::__libcpp_atomic_load(b_, ord);
}
void store(IntType val, MemoryOrder ord) {
std::__libcpp_atomic_store(b_, val, ord);
}
IntType exchange(IntType new_val, MemoryOrder ord) {
return std::__libcpp_atomic_exchange(b_, new_val, ord);
}
bool compare_exchange(IntType *expected, IntType desired, MemoryOrder ord_success, MemoryOrder ord_failure) {
IntType load(MemoryOrder ord) { return std::__libcpp_atomic_load(b_, ord); }
void store(IntType val, MemoryOrder ord) { std::__libcpp_atomic_store(b_, val, ord); }
IntType exchange(IntType new_val, MemoryOrder ord) { return std::__libcpp_atomic_exchange(b_, new_val, ord); }
bool compare_exchange(IntType* expected, IntType desired, MemoryOrder ord_success, MemoryOrder ord_failure) {
return std::__libcpp_atomic_compare_exchange(b_, expected, desired, ord_success, ord_failure);
}
private:
IntType *b_;
IntType* b_;
};
//===----------------------------------------------------------------------===//
@ -148,8 +152,7 @@ private:
#if defined(__APPLE__) && defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
uint32_t PlatformThreadID() {
static_assert(sizeof(mach_port_t) == sizeof(uint32_t), "");
return static_cast<uint32_t>(
pthread_mach_thread_np(std::__libcpp_thread_get_current_id()));
return static_cast<uint32_t>(pthread_mach_thread_np(std::__libcpp_thread_get_current_id()));
}
#elif defined(SYS_gettid) && defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
uint32_t PlatformThreadID() {
@ -160,99 +163,108 @@ uint32_t PlatformThreadID() {
constexpr uint32_t (*PlatformThreadID)() = nullptr;
#endif
constexpr bool PlatformSupportsThreadID() {
return +PlatformThreadID != nullptr;
}
//===----------------------------------------------------------------------===//
// GuardBase
// GuardByte
//===----------------------------------------------------------------------===//
enum class AcquireResult {
INIT_IS_DONE,
INIT_IS_PENDING,
};
constexpr AcquireResult INIT_IS_DONE = AcquireResult::INIT_IS_DONE;
constexpr AcquireResult INIT_IS_PENDING = AcquireResult::INIT_IS_PENDING;
static constexpr uint8_t UNSET = 0;
static constexpr uint8_t COMPLETE_BIT = (1 << 0);
static constexpr uint8_t PENDING_BIT = (1 << 1);
static constexpr uint8_t WAITING_BIT = (1 << 2);
template <class Derived>
struct GuardObject {
GuardObject() = delete;
GuardObject(GuardObject const&) = delete;
GuardObject& operator=(GuardObject const&) = delete;
/// Manages reads and writes to the guard byte.
struct GuardByte {
GuardByte() = delete;
GuardByte(GuardByte const&) = delete;
GuardByte& operator=(GuardByte const&) = delete;
explicit GuardObject(uint32_t* g)
: base_address(g), guard_byte_address(reinterpret_cast<uint8_t*>(g)),
init_byte_address(reinterpret_cast<uint8_t*>(g) + 1),
thread_id_address(nullptr) {}
explicit GuardObject(uint64_t* g)
: base_address(g), guard_byte_address(reinterpret_cast<uint8_t*>(g)),
init_byte_address(reinterpret_cast<uint8_t*>(g) + 1),
thread_id_address(reinterpret_cast<uint32_t*>(g) + 1) {}
explicit GuardByte(uint8_t* const guard_byte_address) : guard_byte(guard_byte_address) {}
public:
/// Implements __cxa_guard_acquire
AcquireResult cxa_guard_acquire() {
AtomicInt<uint8_t> guard_byte(guard_byte_address);
if (guard_byte.load(std::_AO_Acquire) != UNSET)
return INIT_IS_DONE;
return derived()->acquire_init_byte();
/// The guard byte portion of cxa_guard_acquire. Returns true if
/// initialization has already been completed.
bool acquire() {
// if guard_byte is non-zero, we have already completed initialization
// (i.e. release has been called)
return guard_byte.load(std::_AO_Acquire) != UNSET;
}
/// Implements __cxa_guard_release
void cxa_guard_release() {
AtomicInt<uint8_t> guard_byte(guard_byte_address);
// Store complete first, so that when release wakes other folks, they see
// it as having been completed.
guard_byte.store(COMPLETE_BIT, std::_AO_Release);
derived()->release_init_byte();
}
/// The guard byte portion of cxa_guard_release.
void release() { guard_byte.store(COMPLETE_BIT, std::_AO_Release); }
/// Implements __cxa_guard_abort
void cxa_guard_abort() { derived()->abort_init_byte(); }
public:
/// base_address - the address of the original guard object.
void* const base_address;
/// The address of the guard byte at offset 0.
uint8_t* const guard_byte_address;
/// The address of the byte used by the implementation during initialization.
uint8_t* const init_byte_address;
/// An optional address storing an identifier for the thread performing initialization.
/// It's used to detect recursive initialization.
uint32_t* const thread_id_address;
/// The guard byte portion of cxa_guard_abort.
void abort() {} // Nothing to do
private:
Derived* derived() { return static_cast<Derived*>(this); }
AtomicInt<uint8_t> guard_byte;
};
//===----------------------------------------------------------------------===//
// InitByte Implementations
//===----------------------------------------------------------------------===//
//
// Each initialization byte implementation supports the following methods:
//
// InitByte(uint8_t* _init_byte_address, uint32_t* _thread_id_address)
// Construct the InitByte object, initializing our member variables
//
// bool acquire()
// Called before we start the initialization. Check if someone else has already started, and if
// not to signal our intent to start it ourselves. We determine the current status from the init
// byte, which is one of 4 possible values:
// COMPLETE: Initialization was finished by somebody else. Return true.
// PENDING: Somebody has started the initialization already, set the WAITING bit,
// then wait for the init byte to get updated with a new value.
// (PENDING|WAITING): Somebody has started the initialization already, and we're not the
// first one waiting. Wait for the init byte to get updated.
// UNSET: Initialization hasn't successfully completed, and nobody is currently
// performing the initialization. Set the PENDING bit to indicate our
// intention to start the initialization, and return false.
// The return value indicates whether initialization has already been completed.
//
// void release()
// Called after successfully completing the initialization. Update the init byte to reflect
// that, then if anybody else is waiting, wake them up.
//
// void abort()
// Called after an error is thrown during the initialization. Reset the init byte to UNSET to
// indicate that we're no longer performing the initialization, then if anybody is waiting, wake
// them up so they can try performing the initialization.
//
//===----------------------------------------------------------------------===//
// Single Threaded Implementation
//===----------------------------------------------------------------------===//
struct InitByteNoThreads : GuardObject<InitByteNoThreads> {
using GuardObject::GuardObject;
/// InitByteNoThreads - Doesn't use any inter-thread synchronization when
/// managing reads and writes to the init byte.
struct InitByteNoThreads {
InitByteNoThreads() = delete;
InitByteNoThreads(InitByteNoThreads const&) = delete;
InitByteNoThreads& operator=(InitByteNoThreads const&) = delete;
AcquireResult acquire_init_byte() {
explicit InitByteNoThreads(uint8_t* _init_byte_address, uint32_t*) : init_byte_address(_init_byte_address) {}
/// The init byte portion of cxa_guard_acquire. Returns true if
/// initialization has already been completed.
bool acquire() {
if (*init_byte_address == COMPLETE_BIT)
return INIT_IS_DONE;
return true;
if (*init_byte_address & PENDING_BIT)
ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization");
*init_byte_address = PENDING_BIT;
return INIT_IS_PENDING;
return false;
}
void release_init_byte() { *init_byte_address = COMPLETE_BIT; }
void abort_init_byte() { *init_byte_address = UNSET; }
};
/// The init byte portion of cxa_guard_release.
void release() { *init_byte_address = COMPLETE_BIT; }
/// The init byte portion of cxa_guard_abort.
void abort() { *init_byte_address = UNSET; }
private:
/// The address of the byte used during initialization.
uint8_t* const init_byte_address;
};
//===----------------------------------------------------------------------===//
// Global Mutex Implementation
@ -280,9 +292,7 @@ struct LibcppCondVar {
LibcppCondVar(LibcppCondVar const&) = delete;
LibcppCondVar& operator=(LibcppCondVar const&) = delete;
bool wait(LibcppMutex& mut) {
return std::__libcpp_condvar_wait(&cond, &mut.mutex);
}
bool wait(LibcppMutex& mut) { return std::__libcpp_condvar_wait(&cond, &mut.mutex); }
bool broadcast() { return std::__libcpp_condvar_broadcast(&cond); }
private:
@ -293,28 +303,25 @@ struct LibcppMutex {};
struct LibcppCondVar {};
#endif // !defined(_LIBCXXABI_HAS_NO_THREADS)
/// InitByteGlobalMutex - Uses a global mutex and condition variable (common to
/// all static local variables) to manage reads and writes to the init byte.
template <class Mutex, class CondVar, Mutex& global_mutex, CondVar& global_cond,
uint32_t (*GetThreadID)() = PlatformThreadID>
struct InitByteGlobalMutex
: GuardObject<InitByteGlobalMutex<Mutex, CondVar, global_mutex, global_cond,
GetThreadID>> {
struct InitByteGlobalMutex {
using BaseT = typename InitByteGlobalMutex::GuardObject;
using BaseT::BaseT;
explicit InitByteGlobalMutex(uint32_t *g)
: BaseT(g), has_thread_id_support(false) {}
explicit InitByteGlobalMutex(uint64_t *g)
: BaseT(g), has_thread_id_support(PlatformSupportsThreadID()) {}
explicit InitByteGlobalMutex(uint8_t* _init_byte_address, uint32_t* _thread_id_address)
: init_byte_address(_init_byte_address), thread_id_address(_thread_id_address),
has_thread_id_support(_thread_id_address != nullptr && GetThreadID != nullptr) {}
public:
AcquireResult acquire_init_byte() {
/// The init byte portion of cxa_guard_acquire. Returns true if
/// initialization has already been completed.
bool acquire() {
LockGuard g("__cxa_guard_acquire");
// Check for possible recursive initialization.
if (has_thread_id_support && (*init_byte_address & PENDING_BIT)) {
if (*thread_id_address == current_thread_id.get())
ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization");
ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization");
}
// Wait until the pending bit is not set.
@ -324,16 +331,17 @@ public:
}
if (*init_byte_address == COMPLETE_BIT)
return INIT_IS_DONE;
return true;
if (has_thread_id_support)
*thread_id_address = current_thread_id.get();
*init_byte_address = PENDING_BIT;
return INIT_IS_PENDING;
return false;
}
void release_init_byte() {
/// The init byte portion of cxa_guard_release.
void release() {
bool has_waiting;
{
LockGuard g("__cxa_guard_release");
@ -347,7 +355,8 @@ public:
}
}
void abort_init_byte() {
/// The init byte portion of cxa_guard_abort.
void abort() {
bool has_waiting;
{
LockGuard g("__cxa_guard_abort");
@ -364,8 +373,12 @@ public:
}
private:
using BaseT::init_byte_address;
using BaseT::thread_id_address;
/// The address of the byte used during initialization.
uint8_t* const init_byte_address;
/// An optional address storing an identifier for the thread performing initialization.
/// It's used to detect recursive initialization.
uint32_t* const thread_id_address;
const bool has_thread_id_support;
LazyValue<uint32_t, GetThreadID> current_thread_id;
@ -375,8 +388,7 @@ private:
LockGuard(LockGuard const&) = delete;
LockGuard& operator=(LockGuard const&) = delete;
explicit LockGuard(const char* calling_func)
: calling_func_(calling_func) {
explicit LockGuard(const char* calling_func) : calling_func_(calling_func) {
if (global_mutex.lock())
ABORT_WITH_MESSAGE("%s failed to acquire mutex", calling_func_);
}
@ -411,50 +423,40 @@ constexpr void (*PlatformFutexWait)(int*, int) = nullptr;
constexpr void (*PlatformFutexWake)(int*) = nullptr;
#endif
constexpr bool PlatformSupportsFutex() {
return +PlatformFutexWait != nullptr;
}
constexpr bool PlatformSupportsFutex() { return +PlatformFutexWait != nullptr; }
/// InitByteFutex - Manages initialization using atomics and the futex syscall
/// for waiting and waking.
template <void (*Wait)(int*, int) = PlatformFutexWait,
void (*Wake)(int*) = PlatformFutexWake,
/// InitByteFutex - Uses a futex to manage reads and writes to the init byte.
template <void (*Wait)(int*, int) = PlatformFutexWait, void (*Wake)(int*) = PlatformFutexWake,
uint32_t (*GetThreadIDArg)() = PlatformThreadID>
struct InitByteFutex : GuardObject<InitByteFutex<Wait, Wake, GetThreadIDArg>> {
using BaseT = typename InitByteFutex::GuardObject;
struct InitByteFutex {
/// ARM Constructor
explicit InitByteFutex(uint32_t *g) : BaseT(g),
init_byte(this->init_byte_address),
has_thread_id_support(this->thread_id_address && GetThreadIDArg),
thread_id(this->thread_id_address) {}
/// Itanium Constructor
explicit InitByteFutex(uint64_t *g) : BaseT(g),
init_byte(this->init_byte_address),
has_thread_id_support(this->thread_id_address && GetThreadIDArg),
thread_id(this->thread_id_address) {}
explicit InitByteFutex(uint8_t* _init_byte_address, uint32_t* _thread_id_address)
: init_byte(_init_byte_address),
has_thread_id_support(_thread_id_address != nullptr && GetThreadIDArg != nullptr),
thread_id(_thread_id_address),
base_address(reinterpret_cast<int*>(/*_init_byte_address & ~0x3*/ _init_byte_address - 1)) {}
public:
AcquireResult acquire_init_byte() {
/// The init byte portion of cxa_guard_acquire. Returns true if
/// initialization has already been completed.
bool acquire() {
while (true) {
uint8_t last_val = UNSET;
if (init_byte.compare_exchange(&last_val, PENDING_BIT, std::_AO_Acq_Rel,
std::_AO_Acquire)) {
if (init_byte.compare_exchange(&last_val, PENDING_BIT, std::_AO_Acq_Rel, std::_AO_Acquire)) {
if (has_thread_id_support) {
thread_id.store(current_thread_id.get(), std::_AO_Relaxed);
}
return INIT_IS_PENDING;
return false;
}
if (last_val == COMPLETE_BIT)
return INIT_IS_DONE;
return true;
if (last_val & PENDING_BIT) {
// Check for recursive initialization
if (has_thread_id_support && thread_id.load(std::_AO_Relaxed) == current_thread_id.get()) {
ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization");
ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization");
}
if ((last_val & WAITING_BIT) == 0) {
@ -462,11 +464,10 @@ public:
// (1) another thread finished the whole thing before we got here
// (2) another thread set the waiting bit we were trying to thread
// (3) another thread had an exception and failed to finish
if (!init_byte.compare_exchange(&last_val, PENDING_BIT | WAITING_BIT,
std::_AO_Acq_Rel, std::_AO_Release)) {
if (!init_byte.compare_exchange(&last_val, PENDING_BIT | WAITING_BIT, std::_AO_Acq_Rel, std::_AO_Release)) {
// (1) success, via someone else's work!
if (last_val == COMPLETE_BIT)
return INIT_IS_DONE;
return true;
// (3) someone else, bailed on doing the work, retry from the start!
if (last_val == UNSET)
@ -480,30 +481,30 @@ public:
}
}
void release_init_byte() {
/// The init byte portion of cxa_guard_release.
void release() {
uint8_t old = init_byte.exchange(COMPLETE_BIT, std::_AO_Acq_Rel);
if (old & WAITING_BIT)
wake_all();
}
void abort_init_byte() {
/// The init byte portion of cxa_guard_abort.
void abort() {
if (has_thread_id_support)
thread_id.store(0, std::_AO_Relaxed);
uint8_t old = init_byte.exchange(0, std::_AO_Acq_Rel);
uint8_t old = init_byte.exchange(UNSET, std::_AO_Acq_Rel);
if (old & WAITING_BIT)
wake_all();
}
private:
/// Use the futex to wait on the current guard variable. Futex expects a
/// 32-bit 4-byte aligned address as the first argument, so we have to use use
/// the base address of the guard variable (not the init byte).
void wait_on_initialization() {
Wait(static_cast<int*>(this->base_address),
expected_value_for_futex(PENDING_BIT | WAITING_BIT));
}
void wake_all() { Wake(static_cast<int*>(this->base_address)); }
/// 32-bit 4-byte aligned address as the first argument, so we use the 4-byte
/// aligned address that encompasses the init byte (i.e. the address of the
/// raw guard object that was passed to __cxa_guard_acquire/release/abort).
void wait_on_initialization() { Wait(base_address, expected_value_for_futex(PENDING_BIT | WAITING_BIT)); }
void wake_all() { Wake(base_address); }
private:
AtomicInt<uint8_t> init_byte;
@ -513,6 +514,10 @@ private:
AtomicInt<uint32_t> thread_id;
LazyValue<uint32_t, GetThreadIDArg> current_thread_id;
/// the 4-byte-aligned address that encompasses the init byte (i.e. the
/// address of the raw guard object).
int* const base_address;
/// Create the expected integer value for futex `wait(int* addr, int expected)`.
/// We pass the base address as the first argument, So this function creates
/// an zero-initialized integer with `b` copied at the correct offset.
@ -525,6 +530,86 @@ private:
static_assert(Wait != nullptr && Wake != nullptr, "");
};
//===----------------------------------------------------------------------===//
// GuardObject
//===----------------------------------------------------------------------===//
enum class AcquireResult {
INIT_IS_DONE,
INIT_IS_PENDING,
};
constexpr AcquireResult INIT_IS_DONE = AcquireResult::INIT_IS_DONE;
constexpr AcquireResult INIT_IS_PENDING = AcquireResult::INIT_IS_PENDING;
/// Co-ordinates between GuardByte and InitByte.
template <class InitByteT>
struct GuardObject {
GuardObject() = delete;
GuardObject(GuardObject const&) = delete;
GuardObject& operator=(GuardObject const&) = delete;
private:
GuardByte guard_byte;
InitByteT init_byte;
public:
/// ARM Constructor
explicit GuardObject(uint32_t* raw_guard_object)
: guard_byte(reinterpret_cast<uint8_t*>(raw_guard_object)),
init_byte(reinterpret_cast<uint8_t*>(raw_guard_object) + 1, nullptr) {}
/// Itanium Constructor
explicit GuardObject(uint64_t* raw_guard_object)
: guard_byte(reinterpret_cast<uint8_t*>(raw_guard_object)),
init_byte(reinterpret_cast<uint8_t*>(raw_guard_object) + 1, reinterpret_cast<uint32_t*>(raw_guard_object) + 1) {
}
/// Implements __cxa_guard_acquire.
AcquireResult cxa_guard_acquire() {
// Use short-circuit evaluation to avoid calling init_byte.acquire when
// guard_byte.acquire returns true. (i.e. don't call it when we know from
// the guard byte that initialization has already been completed)
if (guard_byte.acquire() || init_byte.acquire())
return INIT_IS_DONE;
return INIT_IS_PENDING;
}
/// Implements __cxa_guard_release.
void cxa_guard_release() {
// Update guard byte first, so if somebody is woken up by init_byte.release
// and comes all the way back around to __cxa_guard_acquire again, they see
// it as having completed initialization.
guard_byte.release();
init_byte.release();
}
/// Implements __cxa_guard_abort.
void cxa_guard_abort() {
guard_byte.abort();
init_byte.abort();
}
};
//===----------------------------------------------------------------------===//
// Convenience Classes
//===----------------------------------------------------------------------===//
/// NoThreadsGuard - Manages initialization without performing any inter-thread
/// synchronization.
using NoThreadsGuard = GuardObject<InitByteNoThreads>;
/// GlobalMutexGuard - Manages initialization using a global mutex and
/// condition variable.
template <class Mutex, class CondVar, Mutex& global_mutex, CondVar& global_cond,
uint32_t (*GetThreadID)() = PlatformThreadID>
using GlobalMutexGuard = GuardObject<InitByteGlobalMutex<Mutex, CondVar, global_mutex, global_cond, GetThreadID>>;
/// FutexGuard - Manages initialization using atomics and the futex syscall for
/// waiting and waking.
template <void (*Wait)(int*, int) = PlatformFutexWait, void (*Wake)(int*) = PlatformFutexWake,
uint32_t (*GetThreadIDArg)() = PlatformThreadID>
using FutexGuard = GuardObject<InitByteFutex<Wait, Wake, GetThreadIDArg>>;
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
@ -536,31 +621,25 @@ struct GlobalStatic {
template <class T>
_LIBCPP_SAFE_STATIC T GlobalStatic<T>::instance = {};
enum class Implementation {
NoThreads,
GlobalLock,
Futex
};
enum class Implementation { NoThreads, GlobalMutex, Futex };
template <Implementation Impl>
struct SelectImplementation;
template <>
struct SelectImplementation<Implementation::NoThreads> {
using type = InitByteNoThreads;
using type = NoThreadsGuard;
};
template <>
struct SelectImplementation<Implementation::GlobalLock> {
using type = InitByteGlobalMutex<
LibcppMutex, LibcppCondVar, GlobalStatic<LibcppMutex>::instance,
GlobalStatic<LibcppCondVar>::instance, PlatformThreadID>;
struct SelectImplementation<Implementation::GlobalMutex> {
using type = GlobalMutexGuard<LibcppMutex, LibcppCondVar, GlobalStatic<LibcppMutex>::instance,
GlobalStatic<LibcppCondVar>::instance, PlatformThreadID>;
};
template <>
struct SelectImplementation<Implementation::Futex> {
using type =
InitByteFutex<PlatformFutexWait, PlatformFutexWake, PlatformThreadID>;
using type = FutexGuard<PlatformFutexWait, PlatformFutexWake, PlatformThreadID>;
};
// TODO(EricWF): We should prefer the futex implementation when available. But
@ -571,22 +650,21 @@ constexpr Implementation CurrentImplementation =
#elif defined(_LIBCXXABI_USE_FUTEX)
Implementation::Futex;
#else
Implementation::GlobalLock;
Implementation::GlobalMutex;
#endif
static_assert(CurrentImplementation != Implementation::Futex
|| PlatformSupportsFutex(), "Futex selected but not supported");
static_assert(CurrentImplementation != Implementation::Futex || PlatformSupportsFutex(),
"Futex selected but not supported");
using SelectedImplementation =
SelectImplementation<CurrentImplementation>::type;
using SelectedImplementation = SelectImplementation<CurrentImplementation>::type;
} // end namespace
} // end namespace __cxxabiv1
#if defined(__clang__)
# pragma clang diagnostic pop
# pragma clang diagnostic pop
#elif defined(__GNUC__)
# pragma GCC diagnostic pop
# pragma GCC diagnostic pop
#endif
#endif // LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_handlers.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -6,7 +6,7 @@
//
//
// This file implements the functionality associated with the terminate_handler,
// unexpected_handler, and new_handler.
// unexpected_handler, and new_handler.
//===----------------------------------------------------------------------===//
#include <stdexcept>
@ -17,7 +17,7 @@
#include "cxa_handlers.h"
#include "cxa_exception.h"
#include "private_typeinfo.h"
#include "include/atomic_support.h"
#include "include/atomic_support.h" // from libc++
namespace std
{
@ -92,16 +92,6 @@ terminate() noexcept
__terminate(get_terminate());
}
extern "C" {
new_handler __cxa_new_handler = 0;
}
new_handler
set_new_handler(new_handler handler) noexcept
{
return __libcpp_atomic_exchange(&__cxa_new_handler, handler, _AO_Acq_Rel);
}
new_handler
get_new_handler() noexcept
{

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_handlers.h -----------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_exception.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------- cxa_exception.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -1004,9 +1004,14 @@ extern "C" _Unwind_Reason_Code __gnu_unwind_frame(_Unwind_Exception*,
static _Unwind_Reason_Code continue_unwind(_Unwind_Exception* unwind_exception,
_Unwind_Context* context)
{
if (__gnu_unwind_frame(unwind_exception, context) != _URC_OK)
return _URC_FAILURE;
switch (__gnu_unwind_frame(unwind_exception, context)) {
case _URC_OK:
return _URC_CONTINUE_UNWIND;
case _URC_END_OF_STACK:
return _URC_END_OF_STACK;
default:
return _URC_FAILURE;
}
}
// ARM register names
@ -1109,7 +1114,14 @@ __gxx_personality_v0(_Unwind_State state,
// Either we didn't do a phase 1 search (due to forced unwinding), or
// phase 1 reported no catching-handlers.
// Search for a (non-catching) cleanup
scan_eh_tab(results, _UA_CLEANUP_PHASE, native_exception, unwind_exception, context);
if (is_force_unwinding)
scan_eh_tab(
results,
static_cast<_Unwind_Action>(_UA_CLEANUP_PHASE | _UA_FORCE_UNWIND),
native_exception, unwind_exception, context);
else
scan_eh_tab(results, _UA_CLEANUP_PHASE, native_exception,
unwind_exception, context);
if (results.reason == _URC_HANDLER_FOUND)
{
// Found a non-catching handler

View File

@ -1,4 +1,4 @@
//===----------------------- cxa_thread_atexit.cpp ------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===-------------------------- cxa_vector.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===-------------------------- cxa_virtual.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,9 @@
//===----------------------------------------------------------------------===//
//
// FIXME: Use std::string_view instead when we support C++17.
// There are two copies of this file in the source tree. The one under
// libcxxabi is the original and the one under llvm is the copy. Use
// cp-to-llvm.sh to update the copy. See README.txt for more details.
//
//===----------------------------------------------------------------------===//
@ -14,7 +17,6 @@
#define DEMANGLE_STRINGVIEW_H
#include "DemangleConfig.h"
#include <algorithm>
#include <cassert>
#include <cstring>
@ -38,15 +40,16 @@ public:
StringView substr(size_t Pos, size_t Len = npos) const {
assert(Pos <= size());
return StringView(begin() + Pos, std::min(Len, size() - Pos));
if (Len > size() - Pos)
Len = size() - Pos;
return StringView(begin() + Pos, Len);
}
size_t find(char C, size_t From = 0) const {
size_t FindBegin = std::min(From, size());
// Avoid calling memchr with nullptr.
if (FindBegin < size()) {
if (From < size()) {
// Just forward to memchr, which is faster than a hand-rolled loop.
if (const void *P = ::memchr(First + FindBegin, C, size() - FindBegin))
if (const void *P = ::memchr(First + From, C, size() - From))
return size_t(static_cast<const char *>(P) - First);
}
return npos;
@ -98,7 +101,7 @@ public:
bool startsWith(StringView Str) const {
if (Str.size() > size())
return false;
return std::equal(Str.begin(), Str.end(), begin());
return std::strncmp(Str.begin(), begin(), Str.size()) == 0;
}
const char &operator[](size_t Idx) const { return *(begin() + Idx); }
@ -111,7 +114,7 @@ public:
inline bool operator==(const StringView &LHS, const StringView &RHS) {
return LHS.size() == RHS.size() &&
std::equal(LHS.begin(), LHS.end(), RHS.begin());
std::strncmp(LHS.begin(), RHS.begin(), LHS.size()) == 0;
}
DEMANGLE_NAMESPACE_END

View File

@ -6,7 +6,10 @@
//
//===----------------------------------------------------------------------===//
//
// Provide some utility classes for use in the demangler(s).
// Provide some utility classes for use in the demangler.
// There are two copies of this file in the source tree. The one in libcxxabi
// is the original and the one in llvm is the copy. Use cp-to-llvm.sh to update
// the copy. See README.txt for more details.
//
//===----------------------------------------------------------------------===//
@ -14,17 +17,18 @@
#define DEMANGLE_UTILITY_H
#include "StringView.h"
#include <array>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <exception>
#include <limits>
DEMANGLE_NAMESPACE_BEGIN
// Stream that AST nodes write their string representation into after the AST
// has been parsed.
class OutputStream {
class OutputBuffer {
char *Buffer = nullptr;
size_t CurrentPosition = 0;
size_t BufferCapacity = 0;
@ -48,8 +52,8 @@ class OutputStream {
return;
}
char Temp[21];
char *TempPtr = std::end(Temp);
std::array<char, 21> Temp;
char *TempPtr = Temp.data() + Temp.size();
while (N) {
*--TempPtr = char('0' + N % 10);
@ -59,13 +63,13 @@ class OutputStream {
// Add negative sign...
if (isNeg)
*--TempPtr = '-';
this->operator<<(StringView(TempPtr, std::end(Temp)));
this->operator<<(StringView(TempPtr, Temp.data() + Temp.size()));
}
public:
OutputStream(char *StartBuf, size_t Size)
OutputBuffer(char *StartBuf, size_t Size)
: Buffer(StartBuf), CurrentPosition(0), BufferCapacity(Size) {}
OutputStream() = default;
OutputBuffer() = default;
void reset(char *Buffer_, size_t BufferCapacity_) {
CurrentPosition = 0;
Buffer = Buffer_;
@ -77,7 +81,7 @@ public:
unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max();
unsigned CurrentPackMax = std::numeric_limits<unsigned>::max();
OutputStream &operator+=(StringView R) {
OutputBuffer &operator+=(StringView R) {
size_t Size = R.size();
if (Size == 0)
return *this;
@ -87,17 +91,28 @@ public:
return *this;
}
OutputStream &operator+=(char C) {
OutputBuffer &operator+=(char C) {
grow(1);
Buffer[CurrentPosition++] = C;
return *this;
}
OutputStream &operator<<(StringView R) { return (*this += R); }
OutputBuffer &operator<<(StringView R) { return (*this += R); }
OutputStream &operator<<(char C) { return (*this += C); }
OutputBuffer prepend(StringView R) {
size_t Size = R.size();
OutputStream &operator<<(long long N) {
grow(Size);
std::memmove(Buffer + Size, Buffer, CurrentPosition);
std::memcpy(Buffer, R.begin(), Size);
CurrentPosition += Size;
return *this;
}
OutputBuffer &operator<<(char C) { return (*this += C); }
OutputBuffer &operator<<(long long N) {
if (N < 0)
writeUnsigned(static_cast<unsigned long long>(-N), true);
else
@ -105,27 +120,37 @@ public:
return *this;
}
OutputStream &operator<<(unsigned long long N) {
OutputBuffer &operator<<(unsigned long long N) {
writeUnsigned(N, false);
return *this;
}
OutputStream &operator<<(long N) {
OutputBuffer &operator<<(long N) {
return this->operator<<(static_cast<long long>(N));
}
OutputStream &operator<<(unsigned long N) {
OutputBuffer &operator<<(unsigned long N) {
return this->operator<<(static_cast<unsigned long long>(N));
}
OutputStream &operator<<(int N) {
OutputBuffer &operator<<(int N) {
return this->operator<<(static_cast<long long>(N));
}
OutputStream &operator<<(unsigned int N) {
OutputBuffer &operator<<(unsigned int N) {
return this->operator<<(static_cast<unsigned long long>(N));
}
void insert(size_t Pos, const char *S, size_t N) {
assert(Pos <= CurrentPosition);
if (N == 0)
return;
grow(N);
std::memmove(Buffer + Pos + N, Buffer + Pos, CurrentPosition - Pos);
std::memcpy(Buffer + Pos, S, N);
CurrentPosition += N;
}
size_t getCurrentPosition() const { return CurrentPosition; }
void setCurrentPosition(size_t NewPos) { CurrentPosition = NewPos; }
@ -171,7 +196,7 @@ public:
SwapAndRestore &operator=(const SwapAndRestore &) = delete;
};
inline bool initializeOutputStream(char *Buf, size_t *N, OutputStream &S,
inline bool initializeOutputBuffer(char *Buf, size_t *N, OutputBuffer &OB,
size_t InitSize) {
size_t BufferSize;
if (Buf == nullptr) {
@ -182,7 +207,7 @@ inline bool initializeOutputStream(char *Buf, size_t *N, OutputStream &S,
} else
BufferSize = *N;
S.reset(Buf, BufferSize);
OB.reset(Buf, BufferSize);
return true;
}

View File

@ -1,4 +1,4 @@
//===------------------------ fallback_malloc.cpp -------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------- fallback_malloc.h --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,180 +0,0 @@
//===----------------------------------------------------------------------===////
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===////
// FIXME: This file is copied from libcxx/src/include/atomic_support.h. Instead
// of duplicating the file in libc++abi we should require that the libc++
// sources are available when building libc++abi.
#ifndef ATOMIC_SUPPORT_H
#define ATOMIC_SUPPORT_H
#include "__config"
#include "memory" // for __libcpp_relaxed_load
#if defined(__clang__) && __has_builtin(__atomic_load_n) \
&& __has_builtin(__atomic_store_n) \
&& __has_builtin(__atomic_add_fetch) \
&& __has_builtin(__atomic_exchange_n) \
&& __has_builtin(__atomic_compare_exchange_n) \
&& defined(__ATOMIC_RELAXED) \
&& defined(__ATOMIC_CONSUME) \
&& defined(__ATOMIC_ACQUIRE) \
&& defined(__ATOMIC_RELEASE) \
&& defined(__ATOMIC_ACQ_REL) \
&& defined(__ATOMIC_SEQ_CST)
# define _LIBCXXABI_HAS_ATOMIC_BUILTINS
#elif !defined(__clang__) && defined(_GNUC_VER) && _GNUC_VER >= 407
# define _LIBCXXABI_HAS_ATOMIC_BUILTINS
#endif
#if !defined(_LIBCXXABI_HAS_ATOMIC_BUILTINS) && !defined(_LIBCXXABI_HAS_NO_THREADS)
# if defined(_LIBCPP_WARNING)
_LIBCPP_WARNING("Building libc++ without __atomic builtins is unsupported")
# else
# warning Building libc++ without __atomic builtins is unsupported
# endif
#endif
_LIBCPP_BEGIN_NAMESPACE_STD
namespace {
#if defined(_LIBCXXABI_HAS_ATOMIC_BUILTINS) && !defined(_LIBCXXABI_HAS_NO_THREADS)
enum __libcpp_atomic_order {
_AO_Relaxed = __ATOMIC_RELAXED,
_AO_Consume = __ATOMIC_CONSUME,
_AO_Acquire = __ATOMIC_ACQUIRE,
_AO_Release = __ATOMIC_RELEASE,
_AO_Acq_Rel = __ATOMIC_ACQ_REL,
_AO_Seq = __ATOMIC_SEQ_CST
};
template <class _ValueType, class _FromType>
inline _LIBCPP_INLINE_VISIBILITY
void __libcpp_atomic_store(_ValueType* __dest, _FromType __val,
int __order = _AO_Seq)
{
__atomic_store_n(__dest, __val, __order);
}
template <class _ValueType, class _FromType>
inline _LIBCPP_INLINE_VISIBILITY
void __libcpp_relaxed_store(_ValueType* __dest, _FromType __val)
{
__atomic_store_n(__dest, __val, _AO_Relaxed);
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_load(_ValueType const* __val,
int __order = _AO_Seq)
{
return __atomic_load_n(__val, __order);
}
template <class _ValueType, class _AddType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_add(_ValueType* __val, _AddType __a,
int __order = _AO_Seq)
{
return __atomic_add_fetch(__val, __a, __order);
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_exchange(_ValueType* __target,
_ValueType __value, int __order = _AO_Seq)
{
return __atomic_exchange_n(__target, __value, __order);
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
bool __libcpp_atomic_compare_exchange(_ValueType* __val,
_ValueType* __expected, _ValueType __after,
int __success_order = _AO_Seq,
int __fail_order = _AO_Seq)
{
return __atomic_compare_exchange_n(__val, __expected, __after, true,
__success_order, __fail_order);
}
#else // _LIBCPP_HAS_NO_THREADS
enum __libcpp_atomic_order {
_AO_Relaxed,
_AO_Consume,
_AO_Acquire,
_AO_Release,
_AO_Acq_Rel,
_AO_Seq
};
template <class _ValueType, class _FromType>
inline _LIBCPP_INLINE_VISIBILITY
void __libcpp_atomic_store(_ValueType* __dest, _FromType __val,
int = 0)
{
*__dest = __val;
}
template <class _ValueType, class _FromType>
inline _LIBCPP_INLINE_VISIBILITY
void __libcpp_relaxed_store(_ValueType* __dest, _FromType __val)
{
*__dest = __val;
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_load(_ValueType const* __val,
int = 0)
{
return *__val;
}
template <class _ValueType, class _AddType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_add(_ValueType* __val, _AddType __a,
int = 0)
{
return *__val += __a;
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
_ValueType __libcpp_atomic_exchange(_ValueType* __target,
_ValueType __value, int = _AO_Seq)
{
_ValueType old = *__target;
*__target = __value;
return old;
}
template <class _ValueType>
inline _LIBCPP_INLINE_VISIBILITY
bool __libcpp_atomic_compare_exchange(_ValueType* __val,
_ValueType* __expected, _ValueType __after,
int = 0, int = 0)
{
if (*__val == *__expected) {
*__val = __after;
return true;
}
*__expected = *__val;
return false;
}
#endif // _LIBCPP_HAS_NO_THREADS
} // end namespace
_LIBCPP_END_NAMESPACE_STD
#endif // ATOMIC_SUPPORT_H

View File

@ -1,4 +1,4 @@
//===----------------------- private_typeinfo.cpp -------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------ private_typeinfo.h --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===---------------------------- exception.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===--------------------- stdlib_new_delete.cpp --------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.

View File

@ -1,4 +1,4 @@
//===------------------------ stdexcept.cpp -------------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -12,9 +12,7 @@
#include <cstring>
#include <cstdint>
#include <cstddef>
// This includes an implementation file from libc++.
#include "../../libcxx/src/include/refstring.h"
#include "include/refstring.h" // from libc++
static_assert(sizeof(std::__libcpp_refstring) == sizeof(const char *), "");

View File

@ -1,4 +1,4 @@
//===----------------------------- typeinfo.cpp ---------------------------===//
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.