Index: mail/thunderbird/Makefile =================================================================== --- mail/thunderbird/Makefile (revision 459159) +++ mail/thunderbird/Makefile (working copy) @@ -63,6 +63,12 @@ MOZ_OPTIONS+= --disable-calendar .endif +.if ${ARCH} == armv6 || ${ARCH} == armv7 +BUILD_DEPENDS+= ${LOCALBASE}/bin/as:devel/binutils +CONFIGURE_ENV+= COMPILER_PATH=${LOCALBASE}/bin +MAKE_ENV+= COMPILER_PATH=${LOCALBASE}/bin +.endif + post-extract: @${SED} -e 's|@PORTNAME_ICON@|${PORTNAME_ICON}|;s|@MOZILLA@|${MOZILLA}|' \ <${FILESDIR}/thunderbird.desktop.in >${WRKDIR}/${MOZILLA_EXEC_NAME}.desktop Index: mail/thunderbird/files/patch-mozilla_ipc_chromium_src_base__atomicops__internals__arm__gcc.h =================================================================== --- mail/thunderbird/files/patch-mozilla_ipc_chromium_src_base__atomicops__internals__arm__gcc.h (nonexistent) +++ mail/thunderbird/files/patch-mozilla_ipc_chromium_src_base__atomicops__internals__arm__gcc.h (working copy) @@ -0,0 +1,265 @@ +--- mozilla/ipc/chromium/src/base/atomicops_internals_arm_gcc.h.orig 2017-10-31 10:44:20 UTC ++++ mozilla/ipc/chromium/src/base/atomicops_internals_arm_gcc.h +@@ -14,6 +14,13 @@ + namespace base { + namespace subtle { + ++#if defined(__FreeBSD__) ++#include ++#include ++#endif ++ ++inline void MemoryBarrier() { ++#if defined(__linux__) || defined(__ANDROID__) + // 0xffff0fc0 is the hard coded address of a function provided by + // the kernel which implements an atomic compare-exchange. On older + // ARM architecture revisions (pre-v6) this may be implemented using +@@ -28,29 +35,161 @@ LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute + typedef void (*LinuxKernelMemoryBarrierFunc)(void); + LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = + (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; ++#elif defined(__FreeBSD__) ++ dsb(); ++#else ++#error MemoryBarrier() is not implemented on this platform. ++#endif ++} + ++// An ARM toolchain would only define one of these depending on which ++// variant of the target architecture is being used. This tests against ++// any known ARMv6 or ARMv7 variant, where it is possible to directly ++// use ldrex/strex instructions to implement fast atomic operations. ++#if defined(__ARM_ARCH_8A__) || \ ++ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ ++ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ ++ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ ++ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ ++ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || \ ++ defined(__ARM_ARCH_6KZ__) + ++ + inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- Atomic32 prev_value = *ptr; ++ Atomic32 prev_value; ++ int reloop; + do { +- if (!pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr))) { +- return old_value; +- } +- prev_value = *ptr; +- } while (prev_value == old_value); ++ // The following is equivalent to: ++ // ++ // prev_value = LDREX(ptr) ++ // reloop = 0 ++ // if (prev_value != old_value) ++ // reloop = STREX(ptr, new_value) ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " mov %1, #0\n" ++ " cmp %0, %4\n" ++#ifdef __thumb2__ ++ " it eq\n" ++#endif ++ " strexeq %1, %5, [%3]\n" ++ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(old_value), "r"(new_value) ++ : "cc", "memory"); ++ } while (reloop != 0); + return prev_value; + } + ++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ MemoryBarrier(); ++ return result; ++} ++ ++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ MemoryBarrier(); ++ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++} ++ ++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ Atomic32 value; ++ int reloop; ++ do { ++ // Equivalent to: ++ // ++ // value = LDREX(ptr) ++ // value += increment ++ // reloop = STREX(ptr, value) ++ // ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " add %0, %0, %4\n" ++ " strex %1, %0, [%3]\n" ++ : "=&r"(value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(increment) ++ : "cc", "memory"); ++ } while (reloop); ++ return value; ++} ++ ++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ // TODO(digit): Investigate if it's possible to implement this with ++ // a single MemoryBarrier() operation between the LDREX and STREX. ++ // See http://crbug.com/246514 ++ MemoryBarrier(); ++ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); ++ MemoryBarrier(); ++ return result; ++} ++ + inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; ++ int reloop; + do { ++ // old_value = LDREX(ptr) ++ // reloop = STREX(ptr, new_value) ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " strex %1, %4, [%3]\n" ++ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(new_value) ++ : "cc", "memory"); ++ } while (reloop != 0); ++ return old_value; ++} ++ ++// This tests against any known ARMv5 variant. ++#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \ ++ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) ++ ++// The kernel also provides a helper function to perform an atomic ++// compare-and-swap operation at the hard-wired address 0xffff0fc0. ++// On ARMv5, this is implemented by a special code path that the kernel ++// detects and treats specially when thread pre-emption happens. ++// On ARMv6 and higher, it uses LDREX/STREX instructions instead. ++// ++// Note that this always perform a full memory barrier, there is no ++// need to add calls MemoryBarrier() before or after it. It also ++// returns 0 on success, and 1 on exit. ++// ++// Available and reliable since Linux 2.6.24. Both Android and ChromeOS ++// use newer kernel revisions, so this should not be a concern. ++namespace { ++ ++inline int LinuxKernelCmpxchg(Atomic32 old_value, ++ Atomic32 new_value, ++ volatile Atomic32* ptr) { ++ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*); ++ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr); ++} ++ ++} // namespace ++ ++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 prev_value; ++ for (;;) { ++ prev_value = *ptr; ++ if (prev_value != old_value) ++ return prev_value; ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) ++ return old_value; ++ } ++} ++ ++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, ++ Atomic32 new_value) { ++ Atomic32 old_value; ++ do { + old_value = *ptr; +- } while (pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr))); ++ } while (LinuxKernelCmpxchg(old_value, new_value, ptr)); + return old_value; + } + +@@ -65,36 +204,57 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomi + // Atomic exchange the old value with an incremented one. + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; +- if (pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr)) == 0) { ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) { + // The exchange took place as expected. + return new_value; + } + // Otherwise, *ptr changed mid-loop and we need to retry. + } +- + } + + inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ Atomic32 prev_value; ++ for (;;) { ++ prev_value = *ptr; ++ if (prev_value != old_value) { ++ // Always ensure acquire semantics. ++ MemoryBarrier(); ++ return prev_value; ++ } ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) ++ return old_value; ++ } + } + + inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ // This could be implemented as: ++ // MemoryBarrier(); ++ // return NoBarrier_CompareAndSwap(); ++ // ++ // But would use 3 barriers per succesful CAS. To save performance, ++ // use Acquire_CompareAndSwap(). Its implementation guarantees that: ++ // - A succesful swap uses only 2 barriers (in the kernel helper). ++ // - An early return due to (prev_value != old_value) performs ++ // a memory barrier with no store, which is equivalent to the ++ // generic implementation above. ++ return Acquire_CompareAndSwap(ptr, old_value, new_value); + } + ++#else ++# error "Your CPU's ARM architecture is not supported yet" ++#endif ++ ++// NOTE: Atomicity of the following load and store operations is only ++// guaranteed in case of 32-bit alignement of |ptr| values. ++ + inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + } + +-inline void MemoryBarrier() { +- pLinuxKernelMemoryBarrier(); +-} +- + inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +@@ -105,9 +265,7 @@ inline void Release_Store(volatile Atomic32* ptr, Atom + *ptr = value; + } + +-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { +- return *ptr; +-} ++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } + + inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; Property changes on: mail/thunderbird/files/patch-mozilla_ipc_chromium_src_base__atomicops__internals__arm__gcc.h ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: mail/thunderbird/files/patch-mozilla_media_libtheora_lib_arm_armcpu.c =================================================================== --- mail/thunderbird/files/patch-mozilla_media_libtheora_lib_arm_armcpu.c (nonexistent) +++ mail/thunderbird/files/patch-mozilla_media_libtheora_lib_arm_armcpu.c (working copy) @@ -0,0 +1,14 @@ +--- mozilla/media/libtheora/lib/arm/armcpu.c.orig 2017-10-03 06:47:36 UTC ++++ mozilla/media/libtheora/lib/arm/armcpu.c +@@ -107,6 +107,11 @@ ogg_uint32_t oc_cpu_flags_get(void){ + return flags; + } + ++#elif defined(__FreeBSD__) ++ogg_uint32_t oc_cpu_flags_get(void){ ++ return 0; ++} ++ + #else + /*The feature registers which can tell us what the processor supports are + accessible in priveleged modes only, so we can't have a general user-space Property changes on: mail/thunderbird/files/patch-mozilla_media_libtheora_lib_arm_armcpu.c ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: mail/thunderbird/files/patch-mozilla_media_libtheora_moz.build =================================================================== --- mail/thunderbird/files/patch-mozilla_media_libtheora_moz.build (nonexistent) +++ mail/thunderbird/files/patch-mozilla_media_libtheora_moz.build (working copy) @@ -0,0 +1,60 @@ +--- mozilla/media/libtheora/moz.build.orig 2017-04-11 04:13:12 UTC ++++ mozilla/media/libtheora/moz.build +@@ -87,31 +87,34 @@ if CONFIG['GNU_AS']: + 'lib/arm/armcpu.c', + 'lib/arm/armstate.c', + ] +- for var in ('OC_ARM_ASM', +- 'OC_ARM_ASM_EDSP', +- 'OC_ARM_ASM_MEDIA', +- 'OC_ARM_ASM_NEON'): +- DEFINES[var] = True +- # The Android NDK doesn't pre-define anything to indicate the OS it's +- # on, so do it for them. +- if CONFIG['OS_TARGET'] == 'Android': +- DEFINES['__linux__'] = True ++ if CONFIG['BUILD_ARM_NEON']: ++ for var in ('OC_ARM_ASM', ++ 'OC_ARM_ASM_EDSP', ++ 'OC_ARM_ASM_MEDIA', ++ 'OC_ARM_ASM_NEON'): ++ DEFINES[var] = True ++ # The Android NDK doesn't pre-define anything to indicate the OS it's ++ # on, so do it for them. ++ if CONFIG['OS_TARGET'] == 'Android': ++ DEFINES['__linux__'] = True ++ ++ SOURCES += [ '!%s.s' % f for f in [ ++ 'armbits-gnu', ++ 'armfrag-gnu', ++ 'armidct-gnu', ++ 'armloop-gnu', ++ ]] ++ ++ # These flags are a lie; they're just used to enable the requisite ++ # opcodes; actual arch detection is done at runtime. ++ ASFLAGS += [ ++ '-march=armv7-a', ++ ] ++ ASFLAGS += CONFIG['NEON_FLAGS'] + +- SOURCES += [ '!%s.s' % f for f in [ +- 'armbits-gnu', +- 'armfrag-gnu', +- 'armidct-gnu', +- 'armloop-gnu', +- ]] +- +- # These flags are a lie; they're just used to enable the requisite +- # opcodes; actual arch detection is done at runtime. +- ASFLAGS += [ +- '-march=armv7-a', +- ] +- ASFLAGS += CONFIG['NEON_FLAGS'] +- + if CONFIG['CLANG_CXX']: + ASFLAGS += [ + '-no-integrated-as', ++ '-Wa,-march=armv7-a', ++ '-Wa,-mfpu=neon', + ] Property changes on: mail/thunderbird/files/patch-mozilla_media_libtheora_moz.build ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: mail/thunderbird/files/patch-mozilla_toolkit_components_protobuf_src_google_protobuf_stubs_atomicops__internals__arm__gcc.h =================================================================== --- mail/thunderbird/files/patch-mozilla_toolkit_components_protobuf_src_google_protobuf_stubs_atomicops__internals__arm__gcc.h (nonexistent) +++ mail/thunderbird/files/patch-mozilla_toolkit_components_protobuf_src_google_protobuf_stubs_atomicops__internals__arm__gcc.h (working copy) @@ -0,0 +1,264 @@ +--- mozilla/toolkit/components/protobuf/src/google/protobuf/stubs/atomicops_internals_arm_gcc.h.orig 2017-10-31 10:44:14 UTC ++++ mozilla/toolkit/components/protobuf/src/google/protobuf/stubs/atomicops_internals_arm_gcc.h +@@ -35,10 +35,17 @@ + #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ + #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ + ++#if defined(__FreeBSD__) ++#include ++#include ++#endif ++ + namespace google { + namespace protobuf { + namespace internal { + ++inline void MemoryBarrier() { ++#if defined(__linux__) || defined(__ANDROID__) + // 0xffff0fc0 is the hard coded address of a function provided by + // the kernel which implements an atomic compare-exchange. On older + // ARM architecture revisions (pre-v6) this may be implemented using +@@ -53,29 +60,160 @@ LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute + typedef void (*LinuxKernelMemoryBarrierFunc)(void); + LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = + (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; ++#elif defined(__FreeBSD__) ++ dsb(); ++#else ++#error MemoryBarrier() is not implemented on this platform. ++#endif ++} + ++// An ARM toolchain would only define one of these depending on which ++// variant of the target architecture is being used. This tests against ++// any known ARMv6 or ARMv7 variant, where it is possible to directly ++// use ldrex/strex instructions to implement fast atomic operations. ++#if defined(__ARM_ARCH_8A__) || \ ++ defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ ++ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ ++ defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ ++ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ ++ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || \ ++ defined(__ARM_ARCH_6KZ__) + + inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- Atomic32 prev_value = *ptr; ++ Atomic32 prev_value; ++ int reloop; + do { +- if (!pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr))) { +- return old_value; +- } +- prev_value = *ptr; +- } while (prev_value == old_value); ++ // The following is equivalent to: ++ // ++ // prev_value = LDREX(ptr) ++ // reloop = 0 ++ // if (prev_value != old_value) ++ // reloop = STREX(ptr, new_value) ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " mov %1, #0\n" ++ " cmp %0, %4\n" ++#ifdef __thumb2__ ++ " it eq\n" ++#endif ++ " strexeq %1, %5, [%3]\n" ++ : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(old_value), "r"(new_value) ++ : "cc", "memory"); ++ } while (reloop != 0); + return prev_value; + } + ++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ MemoryBarrier(); ++ return result; ++} ++ ++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ MemoryBarrier(); ++ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++} ++ ++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ Atomic32 value; ++ int reloop; ++ do { ++ // Equivalent to: ++ // ++ // value = LDREX(ptr) ++ // value += increment ++ // reloop = STREX(ptr, value) ++ // ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " add %0, %0, %4\n" ++ " strex %1, %0, [%3]\n" ++ : "=&r"(value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(increment) ++ : "cc", "memory"); ++ } while (reloop); ++ return value; ++} ++ ++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, ++ Atomic32 increment) { ++ // TODO(digit): Investigate if it's possible to implement this with ++ // a single MemoryBarrier() operation between the LDREX and STREX. ++ // See http://crbug.com/246514 ++ MemoryBarrier(); ++ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); ++ MemoryBarrier(); ++ return result; ++} ++ + inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + Atomic32 old_value; ++ int reloop; + do { ++ // old_value = LDREX(ptr) ++ // reloop = STREX(ptr, new_value) ++ __asm__ __volatile__(" ldrex %0, [%3]\n" ++ " strex %1, %4, [%3]\n" ++ : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr) ++ : "r"(ptr), "r"(new_value) ++ : "cc", "memory"); ++ } while (reloop != 0); ++ return old_value; ++} ++ ++// This tests against any known ARMv5 variant. ++#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \ ++ defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__) ++ ++// The kernel also provides a helper function to perform an atomic ++// compare-and-swap operation at the hard-wired address 0xffff0fc0. ++// On ARMv5, this is implemented by a special code path that the kernel ++// detects and treats specially when thread pre-emption happens. ++// On ARMv6 and higher, it uses LDREX/STREX instructions instead. ++// ++// Note that this always perform a full memory barrier, there is no ++// need to add calls MemoryBarrier() before or after it. It also ++// returns 0 on success, and 1 on exit. ++// ++// Available and reliable since Linux 2.6.24. Both Android and ChromeOS ++// use newer kernel revisions, so this should not be a concern. ++namespace { ++ ++inline int LinuxKernelCmpxchg(Atomic32 old_value, ++ Atomic32 new_value, ++ volatile Atomic32* ptr) { ++ typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*); ++ return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr); ++} ++ ++} // namespace ++ ++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, ++ Atomic32 old_value, ++ Atomic32 new_value) { ++ Atomic32 prev_value; ++ for (;;) { ++ prev_value = *ptr; ++ if (prev_value != old_value) ++ return prev_value; ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) ++ return old_value; ++ } ++} ++ ++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, ++ Atomic32 new_value) { ++ Atomic32 old_value; ++ do { + old_value = *ptr; +- } while (pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr))); ++ } while (LinuxKernelCmpxchg(old_value, new_value, ptr)); + return old_value; + } + +@@ -90,8 +228,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomi + // Atomic exchange the old value with an incremented one. + Atomic32 old_value = *ptr; + Atomic32 new_value = old_value + increment; +- if (pLinuxKernelCmpxchg(old_value, new_value, +- const_cast(ptr)) == 0) { ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) { + // The exchange took place as expected. + return new_value; + } +@@ -102,23 +239,46 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomi + inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ Atomic32 prev_value; ++ for (;;) { ++ prev_value = *ptr; ++ if (prev_value != old_value) { ++ // Always ensure acquire semantics. ++ MemoryBarrier(); ++ return prev_value; ++ } ++ if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) ++ return old_value; ++ } + } + + inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { +- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); ++ // This could be implemented as: ++ // MemoryBarrier(); ++ // return NoBarrier_CompareAndSwap(); ++ // ++ // But would use 3 barriers per succesful CAS. To save performance, ++ // use Acquire_CompareAndSwap(). Its implementation guarantees that: ++ // - A succesful swap uses only 2 barriers (in the kernel helper). ++ // - An early return due to (prev_value != old_value) performs ++ // a memory barrier with no store, which is equivalent to the ++ // generic implementation above. ++ return Acquire_CompareAndSwap(ptr, old_value, new_value); + } + ++#else ++# error "Your CPU's ARM architecture is not supported yet" ++#endif ++ ++// NOTE: Atomicity of the following load and store operations is only ++// guaranteed in case of 32-bit alignement of |ptr| values. ++ + inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + } + +-inline void MemoryBarrier() { +- pLinuxKernelMemoryBarrier(); +-} +- + inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + *ptr = value; + MemoryBarrier(); +@@ -129,9 +289,7 @@ inline void Release_Store(volatile Atomic32* ptr, Atom + *ptr = value; + } + +-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { +- return *ptr; +-} ++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } + + inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + Atomic32 value = *ptr; Property changes on: mail/thunderbird/files/patch-mozilla_toolkit_components_protobuf_src_google_protobuf_stubs_atomicops__internals__arm__gcc.h ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: mail/thunderbird/files/patch-mozilla_xpcom_reflect_xptcall_md_unix_moz.build =================================================================== --- mail/thunderbird/files/patch-mozilla_xpcom_reflect_xptcall_md_unix_moz.build (nonexistent) +++ mail/thunderbird/files/patch-mozilla_xpcom_reflect_xptcall_md_unix_moz.build (working copy) @@ -0,0 +1,11 @@ +--- mozilla/xpcom/reflect/xptcall/md/unix/moz.build.orig 2017-10-04 11:41:06 UTC ++++ mozilla/xpcom/reflect/xptcall/md/unix/moz.build +@@ -90,7 +90,7 @@ if CONFIG['OS_TEST'] == 'alpha': + ] + + if CONFIG['CPU_ARCH'] == 'arm' or CONFIG['OS_TEST'] == 'sa110': +- if CONFIG['OS_ARCH'] == 'Linux': ++ if CONFIG['OS_ARCH'] in ('Linux', 'FreeBSD'): + SOURCES += [ + 'xptcinvoke_arm.cpp', + 'xptcstubs_arm.cpp' Property changes on: mail/thunderbird/files/patch-mozilla_xpcom_reflect_xptcall_md_unix_moz.build ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property