[Author Prev][Author Next][Thread Prev][Thread Next][Author Index][Thread Index]

[tor-commits] [tor-browser-build/master] Bug 33833: Upgrade Rust to use Android NDK 20



commit f2334e1dcb1714a2838b7c61569337f4e425fad5
Author: Georg Koppen <gk@xxxxxxxxxxxxxx>
Date:   Thu Jun 11 09:57:33 2020 +0000

    Bug 33833: Upgrade Rust to use Android NDK 20
    
    Bump Rust version to 1.43.0, too, which is used by Mozilla now.
    
    We add a new project to not interfere with non-mobile toolchain
    requirements for now until both series are moved to the non-ESR release
    train.
---
 projects/fenix-rust/43909.patch  | 252 +++++++++++++++++++++++++++++++++++++++
 projects/fenix-rust/build        |  80 +++++++++++++
 projects/fenix-rust/config       | 105 ++++++++++++++++
 projects/fenix-rust/unwind.patch | 162 +++++++++++++++++++++++++
 4 files changed, 599 insertions(+)

diff --git a/projects/fenix-rust/43909.patch b/projects/fenix-rust/43909.patch
new file mode 100644
index 0000000..78d2a75
--- /dev/null
+++ b/projects/fenix-rust/43909.patch
@@ -0,0 +1,252 @@
+From c95310f2d4fd3c88241c3b5d6dbf6251d34a3256 Mon Sep 17 00:00:00 2001
+From: Nikita Popov <nikita.ppv@xxxxxxxxx>
+Date: Sat, 16 Nov 2019 16:22:18 +0100
+Subject: [PATCH] Restructure caching
+
+Variant on D70103. The caching is switched to always use a BB to
+cache entry map, which then contains per-value caches. A separate
+set contains value handles with a deletion callback. This allows us
+to properly invalidate overdefined values.
+
+A possible alternative would be to always cache by value first and
+have per-BB maps/sets in the each cache entry. In that case we could
+use a ValueMap and would avoid the separate value handle set. I went
+with the BB indexing at the top level to make it easier to integrate
+D69914, but possibly that's not the right choice.
+
+Differential Revision: https://reviews.llvm.org/D70376
+
+diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
+index 110c085d3f3..aa6862cb588 100644
+--- a/llvm/lib/Analysis/LazyValueInfo.cpp
++++ b/llvm/lib/Analysis/LazyValueInfo.cpp
+@@ -133,12 +133,9 @@ namespace {
+   /// A callback value handle updates the cache when values are erased.
+   class LazyValueInfoCache;
+   struct LVIValueHandle final : public CallbackVH {
+-    // Needs to access getValPtr(), which is protected.
+-    friend struct DenseMapInfo<LVIValueHandle>;
+-
+     LazyValueInfoCache *Parent;
+ 
+-    LVIValueHandle(Value *V, LazyValueInfoCache *P)
++    LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
+       : CallbackVH(V), Parent(P) { }
+ 
+     void deleted() override;
+@@ -152,89 +149,63 @@ namespace {
+   /// This is the cache kept by LazyValueInfo which
+   /// maintains information about queries across the clients' queries.
+   class LazyValueInfoCache {
+-    /// This is all of the cached block information for exactly one Value*.
+-    /// The entries are sorted by the BasicBlock* of the
+-    /// entries, allowing us to do a lookup with a binary search.
+-    /// Over-defined lattice values are recorded in OverDefinedCache to reduce
+-    /// memory overhead.
+-    struct ValueCacheEntryTy {
+-      ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
+-      LVIValueHandle Handle;
+-      SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals;
++    /// This is all of the cached information for one basic block. It contains
++    /// the per-value lattice elements, as well as a separate set for
++    /// overdefined values to reduce memory usage.
++    struct BlockCacheEntryTy {
++      SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
++      SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
+     };
+ 
+-    /// This tracks, on a per-block basis, the set of values that are
+-    /// over-defined at the end of that block.
+-    typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
+-        OverDefinedCacheTy;
+-    /// Keep track of all blocks that we have ever seen, so we
+-    /// don't spend time removing unused blocks from our caches.
+-    DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
+-
+-    /// This is all of the cached information for all values,
+-    /// mapped from Value* to key information.
+-    DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
+-    OverDefinedCacheTy OverDefinedCache;
+-
++    /// Cached information per basic block.
++    DenseMap<PoisoningVH<BasicBlock>, BlockCacheEntryTy> BlockCache;
++    /// Set of value handles used to erase values from the cache on deletion.
++    DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
+ 
+   public:
+     void insertResult(Value *Val, BasicBlock *BB,
+                       const ValueLatticeElement &Result) {
+-      SeenBlocks.insert(BB);
+-
++      auto &CacheEntry = BlockCache.try_emplace(BB).first->second;
+       // Insert over-defined values into their own cache to reduce memory
+       // overhead.
+       if (Result.isOverdefined())
+-        OverDefinedCache[BB].insert(Val);
+-      else {
+-        auto It = ValueCache.find_as(Val);
+-        if (It == ValueCache.end()) {
+-          ValueCache[Val] = make_unique<ValueCacheEntryTy>(Val, this);
+-          It = ValueCache.find_as(Val);
+-          assert(It != ValueCache.end() && "Val was just added to the map!");
+-        }
+-        It->second->BlockVals[BB] = Result;
+-      }
+-    }
+-
+-    bool isOverdefined(Value *V, BasicBlock *BB) const {
+-      auto ODI = OverDefinedCache.find(BB);
+-
+-      if (ODI == OverDefinedCache.end())
+-        return false;
++        CacheEntry.OverDefined.insert(Val);
++      else
++        CacheEntry.LatticeElements.insert({ Val, Result });
+ 
+-      return ODI->second.count(V);
++      auto HandleIt = ValueHandles.find_as(Val);
++      if (HandleIt == ValueHandles.end())
++        ValueHandles.insert({ Val, this });
+     }
+ 
+     bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
+-      if (isOverdefined(V, BB))
+-        return true;
+-
+-      auto I = ValueCache.find_as(V);
+-      if (I == ValueCache.end())
++      auto It = BlockCache.find(BB);
++      if (It == BlockCache.end())
+         return false;
+ 
+-      return I->second->BlockVals.count(BB);
++      return It->second.OverDefined.count(V) ||
++             It->second.LatticeElements.count(V);
+     }
+ 
+     ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const {
+-      if (isOverdefined(V, BB))
++      auto It = BlockCache.find(BB);
++      if (It == BlockCache.end())
++        return ValueLatticeElement();
++
++      if (It->second.OverDefined.count(V))
+         return ValueLatticeElement::getOverdefined();
+ 
+-      auto I = ValueCache.find_as(V);
+-      if (I == ValueCache.end())
++      auto LatticeIt = It->second.LatticeElements.find(V);
++      if (LatticeIt == It->second.LatticeElements.end())
+         return ValueLatticeElement();
+-      auto BBI = I->second->BlockVals.find(BB);
+-      if (BBI == I->second->BlockVals.end())
+-        return ValueLatticeElement();
+-      return BBI->second;
++
++      return LatticeIt->second;
+     }
+ 
+     /// clear - Empty the cache.
+     void clear() {
+-      SeenBlocks.clear();
+-      ValueCache.clear();
+-      OverDefinedCache.clear();
++      BlockCache.clear();
++      ValueHandles.clear();
+     }
+ 
+     /// Inform the cache that a given value has been deleted.
+@@ -248,23 +219,18 @@ namespace {
+     /// OldSucc might have (unless also overdefined in NewSucc).  This just
+     /// flushes elements from the cache and does not add any.
+     void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
+-
+-    friend struct LVIValueHandle;
+   };
+ }
+ 
+ void LazyValueInfoCache::eraseValue(Value *V) {
+-  for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
+-    // Copy and increment the iterator immediately so we can erase behind
+-    // ourselves.
+-    auto Iter = I++;
+-    SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
+-    ValueSet.erase(V);
+-    if (ValueSet.empty())
+-      OverDefinedCache.erase(Iter);
++  for (auto &Pair : BlockCache) {
++    Pair.second.LatticeElements.erase(V);
++    Pair.second.OverDefined.erase(V);
+   }
+ 
+-  ValueCache.erase(V);
++  auto HandleIt = ValueHandles.find_as(V);
++  if (HandleIt != ValueHandles.end())
++    ValueHandles.erase(HandleIt);
+ }
+ 
+ void LVIValueHandle::deleted() {
+@@ -274,18 +240,7 @@ void LVIValueHandle::deleted() {
+ }
+ 
+ void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
+-  // Shortcut if we have never seen this block.
+-  DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
+-  if (I == SeenBlocks.end())
+-    return;
+-  SeenBlocks.erase(I);
+-
+-  auto ODI = OverDefinedCache.find(BB);
+-  if (ODI != OverDefinedCache.end())
+-    OverDefinedCache.erase(ODI);
+-
+-  for (auto &I : ValueCache)
+-    I.second->BlockVals.erase(BB);
++  BlockCache.erase(BB);
+ }
+ 
+ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+@@ -303,10 +258,11 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+   std::vector<BasicBlock*> worklist;
+   worklist.push_back(OldSucc);
+ 
+-  auto I = OverDefinedCache.find(OldSucc);
+-  if (I == OverDefinedCache.end())
++  auto I = BlockCache.find(OldSucc);
++  if (I == BlockCache.end() || I->second.OverDefined.empty())
+     return; // Nothing to process here.
+-  SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
++  SmallVector<Value *, 4> ValsToClear(I->second.OverDefined.begin(),
++                                      I->second.OverDefined.end());
+ 
+   // Use a worklist to perform a depth-first search of OldSucc's successors.
+   // NOTE: We do not need a visited list since any blocks we have already
+@@ -320,10 +276,10 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+     if (ToUpdate == NewSucc) continue;
+ 
+     // If a value was marked overdefined in OldSucc, and is here too...
+-    auto OI = OverDefinedCache.find(ToUpdate);
+-    if (OI == OverDefinedCache.end())
++    auto OI = BlockCache.find(ToUpdate);
++    if (OI == BlockCache.end() || OI->second.OverDefined.empty())
+       continue;
+-    SmallPtrSetImpl<Value *> &ValueSet = OI->second;
++    auto &ValueSet = OI->second.OverDefined;
+ 
+     bool changed = false;
+     for (Value *V : ValsToClear) {
+@@ -333,11 +289,6 @@ void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
+       // If we removed anything, then we potentially need to update
+       // blocks successors too.
+       changed = true;
+-
+-      if (ValueSet.empty()) {
+-        OverDefinedCache.erase(OI);
+-        break;
+-      }
+     }
+ 
+     if (!changed) continue;
+-- 
+2.24.0
+
diff --git a/projects/fenix-rust/build b/projects/fenix-rust/build
new file mode 100644
index 0000000..a8f886b
--- /dev/null
+++ b/projects/fenix-rust/build
@@ -0,0 +1,80 @@
+#!/bin/bash
+[% c("var/set_default_env") -%]
+distdir=/var/tmp/dist/[% project %]
+mkdir -p $distdir
+tar -C /var/tmp/dist -xf [% c('input_files_by_name/cmake') %]
+export PATH="/var/tmp/dist/cmake/bin:$PATH"
+tar -C /var/tmp/dist -xf [% c('input_files_by_name/prev_rust') %]
+cd /var/tmp/dist/rust-[% c('var/prev_version') %]-x86_64-unknown-linux-gnu
+./install.sh --prefix=$distdir-rust-old
+export PATH="$distdir-rust-old/bin:$PATH"
+
+[% pc(c('var/compiler'), 'var/setup', { compiler_tarfile => c('input_files_by_name/' _ c('var/compiler')) }) %]
+
+[% IF c("var/osx") %]
+  # We need to clear `CC` and `LDFLAGS` as they are used for the host platform
+  # (i.e. Linux).
+  unset CC
+  unset LDFLAGS
+  # Target 10.9 as our toolchain does. Without this explicit declaration Bad
+  # Things will happen, as a lot of dependent code then assumes that the
+  # official macOS target, x86_64-apple-darwin, essentially means 10.4.
+  export MACOSX_DEPLOYMENT_TARGET=[% c("var/macosx_deployment_target") %]
+  # The Rust target for macOS is x86_64-apple-darwin, yet our toolchain is built
+  # for x86_64-apple-darwin11. We can't mix those targets as clang gets confused
+  # that way. Changing the Rust target to x86_64-apple-darwin11 would require a
+  # fair amount of patching, thus we create symlinks to provide Rust with the
+  # necessary tools while using our toolchain underneath, targeting 10.9.
+  cd $cctoolsdir
+  for f in `ls x86_64-apple-darwin11-*`; do
+    ln -s $f ${f//x86_64-apple-darwin11/x86_64-apple-darwin}
+  done
+  cd ..
+  ln -s x86_64-apple-darwin11 x86_64-apple-darwin
+  mkdir $distdir/helper
+
+  # We need to adapt our CFLAGS and make sure our flags are passed down to all
+  # dependencies. Using `CFLAGS_x86_apple-darwin` did not do the trick, so resort
+  # to a wrapper script.
+  cat > $distdir/helper/x86_64-apple-darwin-clang << 'EOF'
+#!/bin/sh
+BASEDIR=/var/tmp/dist/macosx-toolchain
+$BASEDIR/cctools/bin/x86_64-apple-darwin-clang -target x86_64-apple-darwin -B $BASEDIR/cctools/bin -isysroot $BASEDIR/MacOSX10.11.sdk/ -Wl,-syslibroot,$BASEDIR/MacOSX10.11.sdk/ -Wl,-dead_strip -Wl,-pie "$@"
+EOF
+
+  chmod +x $distdir/helper/x86_64-apple-darwin-clang
+  export PATH=$distdir/helper:$PATH
+[% END %]
+
+cd $rootdir
+mkdir /var/tmp/build
+tar -C /var/tmp/build -xf  [% c('input_files_by_name/rust') %]
+cd /var/tmp/build/rustc-[% c('version') %]-src
+
+# LLVM has reproducibility issues when optimizing bitcode, which we need to
+# patch. See: #32053 for more details.
+cd src/llvm-project
+patch -p1 < $rootdir/43909.patch
+cd ../../
+
+[% IF c("var/windows-i686") %]
+  # Cross-compiling for Windows 32bit is currently not possible without any
+  # patches. The reason for that is libstd expecting DWARF unwinding while most
+  # toolchains on Linux, targeting Windows 32bit, use SjLj unwinding.
+  # See: https://github.com/rust-lang/rust/issues/12859 for discussion about
+  # that and https://github.com/rust-lang/rust/pull/49633 for a newer attempt to
+  # fix this problem. We apply the patch from neersighted.
+  patch -p1 < $rootdir/unwind.patch
+[% END %]
+
+mkdir build
+cd build
+../configure --prefix=$distdir [% c("var/configure_opt") %]
+
+make -j[% c("buildconf/num_procs") %]
+make install
+cd /var/tmp/dist
+[% c('tar', {
+        tar_src => [ project ],
+        tar_args => '-czf ' _ dest_dir _ '/' _ c('filename'),
+        }) %]
diff --git a/projects/fenix-rust/config b/projects/fenix-rust/config
new file mode 100644
index 0000000..9ad9908
--- /dev/null
+++ b/projects/fenix-rust/config
@@ -0,0 +1,105 @@
+# vim: filetype=yaml sw=2
+filename: '[% project %]-[% c("version") %]-[% c("var/build_id") %].tar.gz'
+version: '[% c("input_file_var/rust_version") %]'
+
+# those values can be changed from the input_files section of other
+# projects. See projects/lucetc/config and bug 32436.
+input_file_var:
+  rust_version: 1.43.0
+  prev_version: 1.42.0
+
+var:
+  prev_version: '[% c("input_file_var/prev_version") %]'
+  container:
+    use_container: 1
+
+targets:
+  android:
+    var:
+      arch_deps:
+        - libssl-dev
+        - pkg-config
+        - zlib1g-dev
+      configure_opt: --enable-local-rust --enable-vendor --enable-extended --release-channel=stable --sysconfdir=etc --target=[% c("var/cross_prefix") %] --set=target.[% c("var/cross_prefix") %].cc=[% c("var/CC") %] --set=target.[% c("var/cross_prefix") %].ar=[% c("var/cross_prefix") %]-ar
+
+  android-armv7:
+    var:
+      configure_opt: --enable-local-rust --enable-vendor --enable-extended --release-channel=stable --sysconfdir=etc --target=thumbv7neon-linux-androideabi --set=target.thumbv7neon-linux-androideabi.cc=[% c("var/CC") %] --set=target.thumbv7neon-linux-androideabi.ar=[% c("var/cross_prefix") %]-ar
+
+  linux:
+    var:
+      deps:
+        - libc6-dev-i386
+        - lib32stdc++6
+        - build-essential
+        - python
+        - automake
+        - libssl-dev
+        - pkg-config
+        - hardening-wrapper
+      # We use
+      # `--enable-local-rust` to avoid downloading the required compiler during
+      # build time
+      #
+      # `--enable-vendor` to avoid downloading crates during build time and just
+      # use the ones which are shipped with the source
+      #
+      # `--enable-extended` to build not only rustc but cargo as well
+      #
+      # `--enable-llvm-static-stdccp` to take a libstdc++ on Wheezy into account
+      # which is too old and if used gives undefined reference errors
+      #
+      # `--release-channel=stable` to just include stable features in the
+      # compiler
+      #
+      # `--sysconfdir=etc` to avoid install failures as |make install| wants to
+      # write to /etc otherwise
+      #
+      # the `target` triple to explicitly specify the architecture and platform
+      # for the compiler/std lib. Ideally, it should not be needed unless one is
+      # cross-compiling, but compiling `alloc_jemalloc` fails without that in a
+      # 32bit container. "--host=x86_64-unknown-linux-gnu" is used in its
+      # configure script in this case.
+      # `--set=` to explicitly specify the C compiler. We need to compile the
+      # bundled LLVM and it wants to use `cc`. However, we don't have that in
+      # our compiled GCC resulting in weird errors due to C and C++ compiler
+      # version mismatch. We avoid that with this configure option. We need to
+      # build our own GCC in the first place as 4.7.2 is too old to get all the
+      # Rust pieces compiled.
+      configure_opt: --enable-local-rust --enable-vendor --enable-extended --enable-llvm-static-stdcpp --release-channel=stable --sysconfdir=etc --target=x86_64-unknown-linux-gnu,i686-unknown-linux-gnu --set=target.x86_64-unknown-linux-gnu.cc=gcc --set=target.i686-unknown-linux-gnu.cc=gcc
+
+  osx-x86_64:
+    var:
+      arch_deps:
+        - libssl-dev
+        - pkg-config
+        - zlib1g-dev
+      configure_opt: --enable-local-rust --enable-vendor --enable-extended --release-channel=stable --sysconfdir=etc --target=x86_64-apple-darwin --set=target.x86_64-apple-darwin.cc=x86_64-apple-darwin-clang
+
+  windows:
+    var:
+      arch_deps:
+        - libssl-dev
+        - pkg-config
+        - zlib1g-dev
+      configure_opt: --enable-local-rust --enable-vendor --enable-extended --release-channel=stable --sysconfdir=etc --target=[% c("arch") %]-pc-windows-gnu
+
+input_files:
+  - project: container-image
+  - project: cmake
+    name: cmake
+  - project: '[% c("var/compiler") %]'
+    name: '[% c("var/compiler") %]'
+  - URL: 'https://static.rust-lang.org/dist/rustc-[% c("version") %]-src.tar.gz'
+    name: rust
+    sig_ext: asc
+    file_gpg_id: 1
+    gpg_keyring: rust.gpg
+  - URL: 'https://static.rust-lang.org/dist/rust-[% c("var/prev_version") %]-x86_64-unknown-linux-gnu.tar.xz'
+    name: prev_rust
+    sig_ext: asc
+    file_gpg_id: 1
+    gpg_keyring: rust.gpg
+  - filename: unwind.patch
+    enable: '[% c("var/windows-i686") %]'
+  - filename: 43909.patch
diff --git a/projects/fenix-rust/unwind.patch b/projects/fenix-rust/unwind.patch
new file mode 100644
index 0000000..7b22dcb
--- /dev/null
+++ b/projects/fenix-rust/unwind.patch
@@ -0,0 +1,162 @@
+From b3bea7008ece7a5bdf9b5a5dcc95e82febad1854 Mon Sep 17 00:00:00 2001
+From: Bjorn Neergaard <bjorn@xxxxxxxxxxxxxxx>
+Date: Sat, 9 Feb 2019 19:39:23 +0000
+Subject: [PATCH] Fix cross-compiling i686-pc-windows-gnu from Linux
+
+This is still very rough and serves as a proof-of-concept for fixing
+Linux -> 32-bit MinGW cross compilation workflow. Currently, clang and
+GCC's MinGW targets both only support DW2 (DWARF) or SJLJ (Set Jump Long
+Jump) unwinding on 32-bit Windows.
+
+The default for GCC (and the way it is shipped on every major distro) is
+to use SJLJ on Windows, as DWARF cannot traverse non-DWARF frames. This
+would work fine, except for the fact that libgcc (our C runtime on the
+MinGW platform) exports symbols under a different name when configured
+to use SJLJ-style unwinding, and uses a preprocessor macro internally to
+alias them.
+
+Because of this, we have to detect this scenario and link to the correct
+symbols ourselves. Linking has been tested with a full bootstrap on both
+x86_64-unknown-linux-gnu and i686-pc-windows-gnu, as well as
+cross-compilation of some of my own projects.
+
+Obviously, the detection is a bit unrefined. Right now we
+unconditionally use SJLJ when compiling Linux -> MinGW. I'd like to add
+feature detection using compiler build flags or autotools-style
+compilation and object analysis. Input on the best way to proceed here
+is welcome.
+
+Also, currently there is copy-pasted/duplicated code in libunwind.
+Ideally, this could be reduced, but this would likely require a
+rethinking of how iOS is special-cased above, to avoid further
+duplication. Input on how to best structure this file is requested.
+
+diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs
+index 249a183189..df08d6eb0c 100644
+--- a/src/bootstrap/compile.rs
++++ b/src/bootstrap/compile.rs
+@@ -162,7 +162,12 @@ pub fn std_cargo(builder: &Builder<'_>,
+             .arg("--features")
+             .arg("compiler-builtins-mem");
+     } else {
+-        let features = builder.std_features();
++        let mut features = builder.std_features();
++
++        // FIXME: Temporary detection of SJLJ MinGW compilers.
++        if builder.config.build.contains("linux") && target == "i686-pc-windows-gnu" {
++            features.push_str(" sjlj_eh");
++        }
+ 
+         if compiler.stage != 0 && builder.config.sanitizers {
+             // This variable is used by the sanitizer runtime crates, e.g.
+diff --git a/src/libstd/Cargo.toml b/src/libstd/Cargo.toml
+index 7d60a17042..d876d0b89a 100644
+--- a/src/libstd/Cargo.toml
++++ b/src/libstd/Cargo.toml
+@@ -71,3 +71,4 @@ wasm-bindgen-threads = []
+ # https://github.com/rust-lang-nursery/stdsimd/blob/master/crates/std_detect/Cargo.toml
+ std_detect_file_io = []
+ std_detect_dlsym_getauxval = []
++sjlj_eh = ["unwind/sjlj_eh"]
+diff --git a/src/libunwind/Cargo.toml b/src/libunwind/Cargo.toml
+index 2378b0a315..0b5979ed62 100644
+--- a/src/libunwind/Cargo.toml
++++ b/src/libunwind/Cargo.toml
+@@ -16,3 +16,6 @@ doc = false
+ core = { path = "../libcore" }
+ libc = { version = "0.2.43", features = ['rustc-dep-of-std'], default-features = false }
+ compiler_builtins = "0.1.0"
++
++[features]
++sjlj_eh = []
+diff --git a/src/libunwind/libunwind.rs b/src/libunwind/libunwind.rs
+index 339b554ed6..ec2f93ed60 100644
+--- a/src/libunwind/libunwind.rs
++++ b/src/libunwind/libunwind.rs
+@@ -1,10 +1,5 @@
+ #![allow(nonstandard_style)]
+ 
+-macro_rules! cfg_if {
+-    ( $( if #[cfg( $meta:meta )] { $($it1:item)* } else { $($it2:item)* } )* ) =>
+-        ( $( $( #[cfg($meta)] $it1)* $( #[cfg(not($meta))] $it2)* )* )
+-}
+-
+ use libc::{c_int, c_void, uintptr_t};
+ 
+ #[repr(C)]
+@@ -73,8 +68,8 @@ pub enum _Unwind_Context {}
+ pub type _Unwind_Exception_Cleanup_Fn = extern "C" fn(unwind_code: _Unwind_Reason_Code,
+                                                       exception: *mut _Unwind_Exception);
+ extern "C" {
+-    #[unwind(allowed)]
+-    pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
++    #[cfg_attr(stage0, unwind)]
++    #[cfg_attr(not(stage0), unwind(allowed))]
+     pub fn _Unwind_DeleteException(exception: *mut _Unwind_Exception);
+     pub fn _Unwind_GetLanguageSpecificData(ctx: *mut _Unwind_Context) -> *mut c_void;
+     pub fn _Unwind_GetRegionStart(ctx: *mut _Unwind_Context) -> _Unwind_Ptr;
+@@ -206,26 +201,52 @@ if #[cfg(all(any(target_os = "ios", target_os = "netbsd", not(target_arch = "arm
+         pc
+     }
+ }
++} // cfg_if!
+ 
+-if #[cfg(not(all(target_os = "ios", target_arch = "arm")))] {
+-    // Not 32-bit iOS
++cfg_if! {
++if #[cfg(all(target_os = "ios", target_arch = "arm"))] {
++    // 32-bit iOS uses SjLj and does not provide _Unwind_Backtrace()
+     extern "C" {
+-        #[unwind(allowed)]
+-        pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
++        #[cfg_attr(stage0, unwind)]
++        #[cfg_attr(not(stage0), unwind(allowed))]
++        pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
++        pub fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
++    }
++
++    #[inline]
++    pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception) -> _Unwind_Reason_Code {
++        _Unwind_SjLj_RaiseException(exc)
++    }
++
++} else if #[cfg(feature = "sjlj_eh")] {
++    extern "C" {
++        #[cfg_attr(stage0, unwind)]
++        #[cfg_attr(not(stage0), unwind(allowed))]
++        pub fn _Unwind_SjLj_Resume(e: *mut _Unwind_Exception) -> !;
++        pub fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
+         pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
+                                  trace_argument: *mut c_void)
+                                  -> _Unwind_Reason_Code;
+     }
+-} else {
+-    // 32-bit iOS uses SjLj and does not provide _Unwind_Backtrace()
+-    extern "C" {
+-        #[unwind(allowed)]
+-        pub fn _Unwind_SjLj_RaiseException(e: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
++
++    #[inline]
++    pub unsafe fn _Unwind_Resume(exc: *mut _Unwind_Exception) -> ! {
++        _Unwind_SjLj_Resume(exc)
+     }
+ 
+     #[inline]
+     pub unsafe fn _Unwind_RaiseException(exc: *mut _Unwind_Exception) -> _Unwind_Reason_Code {
+         _Unwind_SjLj_RaiseException(exc)
+     }
++} else {
++    extern "C" {
++        #[cfg_attr(stage0, unwind)]
++        #[cfg_attr(not(stage0), unwind(allowed))]
++        pub fn _Unwind_Resume(exception: *mut _Unwind_Exception) -> !;
++        pub fn _Unwind_RaiseException(exception: *mut _Unwind_Exception) -> _Unwind_Reason_Code;
++        pub fn _Unwind_Backtrace(trace: _Unwind_Trace_Fn,
++                                 trace_argument: *mut c_void)
++                                 -> _Unwind_Reason_Code;
++    }
+ }
+ } // cfg_if!
+-- 
+2.23.0.rc0
+



_______________________________________________
tor-commits mailing list
tor-commits@xxxxxxxxxxxxxxxxxxxx
https://lists.torproject.org/cgi-bin/mailman/listinfo/tor-commits