View | Details | Raw Unified | Return to bug 201227 | Differences between
and this patch

Collapse All | Expand All

(-)Makefile (-2 / +7 lines)
Lines 20-32 Link Here
20
20
21
CONFLICTS=	node-0.[02-9][0-9]* node-devel-0.[0-9]* iojs-*
21
CONFLICTS=	node-0.[02-9][0-9]* node-devel-0.[0-9]* iojs-*
22
22
23
ONLY_FOR_ARCHS=	i386 amd64
23
ONLY_FOR_ARCHS=	i386 amd64 armv6
24
CONFIGURE_ARGS=	--prefix=${PREFIX_RELDEST} --without-npm --shared-zlib
24
CONFIGURE_ARGS=	--prefix=${PREFIX_RELDEST} --without-npm --shared-zlib --debug
25
PREFIX_RELDEST=	${PREFIX:S,^${DESTDIR},,}
25
PREFIX_RELDEST=	${PREFIX:S,^${DESTDIR},,}
26
REINPLACE_ARGS=	-i ''
26
REINPLACE_ARGS=	-i ''
27
MAKE_ENV+=	CC.host=${CC} CXX.host=${CXX} LINK.host=${CXX} LINK.target=${CXX}
27
MAKE_ENV+=	CC.host=${CC} CXX.host=${CXX} LINK.host=${CXX} LINK.target=${CXX}
28
28
29
.include <bsd.port.pre.mk>
29
.include <bsd.port.pre.mk>
30
31
.if ${ARCH} == "armv6"
32
CONFIGURE_ARGS=	--openssl-no-asm
33
.endif
34
30
.if ${COMPILER_TYPE} == clang
35
.if ${COMPILER_TYPE} == clang
31
MAKE_ENV+=	LINK=clang++
36
MAKE_ENV+=	LINK=clang++
32
CFLAGS+=	-Wno-unused-private-field
37
CFLAGS+=	-Wno-unused-private-field
(-)files/patch-deps_v8_src_arm_assembler-arm.cc (+47 lines)
Line 0 Link Here
1
--- deps/v8/src/arm/assembler-arm.cc.orig	2015-06-01 14:03:13 UTC
2
+++ deps/v8/src/arm/assembler-arm.cc
3
@@ -66,6 +66,35 @@ namespace internal {
4
 // name space and pid 0 is used to kill the group (see man 2 kill).
5
 static const pthread_t kNoThread = (pthread_t) 0;
6
 
7
+#ifdef __arm__
8
+
9
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
10
+  return false;
11
+}
12
+
13
+CpuImplementer OS::GetCpuImplementer() {
14
+  static bool use_cached_value = false;
15
+  static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
16
+  if (use_cached_value) {
17
+    return cached_value;
18
+  }
19
+  cached_value = ARM_IMPLEMENTER;
20
+
21
+  use_cached_value = true;
22
+  return cached_value;
23
+}
24
+
25
+
26
+bool OS::ArmUsingHardFloat() {
27
+#if defined(__ARM_PCS_VFP)
28
+  return true;
29
+#else
30
+  return false;
31
+#endif
32
+}
33
+
34
+#endif  // def __arm__
35
+
36
 
37
 double ceiling(double x) {
38
     // Correct as on OS X
39
@@ -857,7 +857,7 @@ static bool fits_shifter(uint32_t imm32,
40
                          Instr* instr) {
41
   // imm32 must be unsigned.
42
   for (int rot = 0; rot < 16; rot++) {
43
-    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
44
+    uint32_t imm8 = rot == 0 ? imm32 : ((imm32 << 2*rot) | (imm32 >> (32 - 2*rot)));
45
     if ((imm8 <= 0xff)) {
46
       *rotate_imm = rot;
47
       *immed_8 = imm8;
(-)files/patch-deps_v8_src_arm_cpu-arm.cc (+22 lines)
Line 0 Link Here
1
--- deps/v8/src/arm/cpu-arm.cc.orig	2015-03-02 10:36:52 UTC
2
+++ deps/v8/src/arm/cpu-arm.cc
3
@@ -64,7 +64,7 @@ void CPU::FlushICache(void* start, size_
4
   // None of this code ends up in the snapshot so there are no issues
5
   // around whether or not to generate the code when building snapshots.
6
   Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
7
-#else
8
+#elif defined(__linux__)
9
   // Ideally, we would call
10
   //   syscall(__ARM_NR_cacheflush, start,
11
   //           reinterpret_cast<intptr_t>(start) + size, 0);
12
@@ -103,6 +103,10 @@ void CPU::FlushICache(void* start, size_
13
         : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
14
         : "r3");
15
   #endif
16
+#elif defined(__FreeBSD__)
17
+  __clear_cache(start, reinterpret_cast<char*>(start) + size);
18
+#else
19
+#error "No cache flush implementation on this platform"
20
 #endif
21
 }
22
 
(-)files/patch-deps_v8_src_atomicops.h (+14 lines)
Line 0 Link Here
1
--- deps/v8/src/atomicops.h.orig	2015-06-01 13:24:35 UTC
2
+++ deps/v8/src/atomicops.h
3
@@ -162,8 +162,10 @@ Atomic64 Release_Load(volatile const Ato
4
 #elif defined(__GNUC__) && \
5
   (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
6
 #include "atomicops_internals_x86_gcc.h"
7
-#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
8
+#elif defined(__GNUC__) && defined(__linux__) && defined(V8_HOST_ARCH_ARM)
9
 #include "atomicops_internals_arm_gcc.h"
10
+#elif defined(__FreeBSD__) && defined(V8_HOST_ARCH_ARM)
11
+#include "atomicops_internals_generic_gcc.h"
12
 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
13
 #include "atomicops_internals_mips_gcc.h"
14
 #else
(-)files/patch-deps_v8_src_atomicops__internals__generic__gcc.h (+138 lines)
Line 0 Link Here
1
--- deps/v8/src/atomicops_internals_generic_gcc.h.orig	2015-06-01 14:05:59 UTC
2
+++ deps/v8/src/atomicops_internals_generic_gcc.h
3
@@ -0,0 +1,135 @@
4
+// Copyright 2013 Red Hat Inc.  All rights reserved.
5
+//
6
+// Redistribution and use in source and binary forms, with or without
7
+// modification, are permitted provided that the following conditions are
8
+// met:
9
+//
10
+//     * Redistributions of source code must retain the above copyright
11
+// notice, this list of conditions and the following disclaimer.
12
+//     * Redistributions in binary form must reproduce the above
13
+// copyright notice, this list of conditions and the following disclaimer
14
+// in the documentation and/or other materials provided with the
15
+// distribution.
16
+//     * Neither the name of Red Hat Inc. nor the names of its
17
+// contributors may be used to endorse or promote products derived from
18
+// this software without specific prior written permission.
19
+//
20
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+
32
+// This file is an internal atomic implementation, use atomicops.h instead.
33
+
34
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
35
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
36
+
37
+namespace v8 {
38
+namespace internal {
39
+
40
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
41
+                                         Atomic32 old_value,
42
+                                         Atomic32 new_value) {
43
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
44
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
45
+  return old_value;
46
+}
47
+
48
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
49
+                                         Atomic32 new_value) {
50
+  return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
51
+}
52
+
53
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
54
+                                          Atomic32 increment) {
55
+  return __atomic_add_fetch(ptr, increment, __ATOMIC_RELAXED);
56
+}
57
+
58
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
59
+                                        Atomic32 increment) {
60
+  return __atomic_add_fetch(ptr, increment, __ATOMIC_SEQ_CST);
61
+}
62
+
63
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
64
+                                       Atomic32 old_value,
65
+                                       Atomic32 new_value) {
66
+  __atomic_compare_exchange(ptr, &old_value, &new_value, true,
67
+                            __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
68
+  return old_value;
69
+}
70
+
71
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
72
+                                       Atomic32 old_value,
73
+                                       Atomic32 new_value) {
74
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
75
+                              __ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
76
+  return old_value;
77
+}
78
+
79
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
80
+  __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
81
+}
82
+
83
+inline void MemoryBarrier() {
84
+  __sync_synchronize();
85
+}
86
+
87
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
88
+  __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST);
89
+}
90
+
91
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
92
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
93
+}
94
+
95
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
96
+  return __atomic_load_n(ptr, __ATOMIC_RELAXED);
97
+}
98
+
99
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
100
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
101
+}
102
+
103
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
104
+  return __atomic_load_n(ptr, __ATOMIC_SEQ_CST);
105
+}
106
+
107
+#ifdef __LP64__
108
+
109
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
110
+  __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
111
+}
112
+
113
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
114
+  return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
115
+}
116
+
117
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
118
+                                       Atomic64 old_value,
119
+                                       Atomic64 new_value) {
120
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
121
+                              __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
122
+  return old_value;
123
+}
124
+
125
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
126
+                                         Atomic64 old_value,
127
+                                         Atomic64 new_value) {
128
+  __atomic_compare_exchange_n(ptr, &old_value, new_value, true,
129
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
130
+  return old_value;
131
+}
132
+
133
+#endif // defined(__LP64__)
134
+
135
+}  // namespace internal
136
+}  // v8
137
+
138
+#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_GENERIC_GCC_H_
(-)files/patch-deps_v8_src_platform-freebsd.cc (+13 lines)
Lines 27-29 Link Here
27
               kMmapFd,
27
               kMmapFd,
28
               kMmapFdOffset) != MAP_FAILED;
28
               kMmapFdOffset) != MAP_FAILED;
29
 }
29
 }
30
@@ -690,9 +690,9 @@ static void ProfilerSignalHandler(int si
31
   sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
32
   sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
33
 #elif V8_HOST_ARCH_ARM
34
-  sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
35
-  sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
36
-  sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
37
+  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_PC]);
38
+  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_SP]);
39
+  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_FP]);
40
 #endif
41
   sampler->SampleStack(sample);
42
   sampler->Tick(sample);

Return to bug 201227