summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXavier Leroy <xavier.leroy@inria.fr>2009-05-04 13:46:46 +0000
committerXavier Leroy <xavier.leroy@inria.fr>2009-05-04 13:46:46 +0000
commit4e50c497a873e7db9a7aa1c0a04ef39fb04a6cda (patch)
tree5f76f817b7bbc23bfa12403d2ff9537d760378ed
parent5ba174438fe1d6038f483676aba2ad65eb7b709b (diff)
downloadocaml-4e50c497a873e7db9a7aa1c0a04ef39fb04a6cda.tar.gz
Updating the ARM port, continued:
- Reserve register r9 and treat r10 as callee-save, as per the EABI. - Treatment of alloc_limit register. - Fixed bug in inlined allocation sequence. git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@9252 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02
-rw-r--r--asmcomp/arm/emit.mlp76
-rw-r--r--asmcomp/arm/proc.ml31
-rw-r--r--asmrun/arm.S201
-rw-r--r--asmrun/signals_osdep.h19
4 files changed, 163 insertions, 164 deletions
diff --git a/asmcomp/arm/emit.mlp b/asmcomp/arm/emit.mlp
index 1e0c915261..5d9e5cf7cc 100644
--- a/asmcomp/arm/emit.mlp
+++ b/asmcomp/arm/emit.mlp
@@ -184,16 +184,23 @@ let decompose_intconst n fn =
done;
!ninstr
-(* Emit a non-immediate integer constant *)
-
-let emit_complex_intconst r n =
- let first = ref true in
- decompose_intconst n
- (fun bits ->
- if !first
- then ` mov {emit_reg r}, #{emit_nativeint bits} @ {emit_nativeint n}\n`
- else ` add {emit_reg r}, {emit_reg r}, #{emit_nativeint bits}\n`;
- first := false)
+(* Load an integer constant into a register *)
+
+let emit_intconst r n =
+ let nr = Nativeint.lognot n in
+ if is_immediate n then begin
+ ` mov {emit_reg r}, #{emit_nativeint n}\n`; 1
+ end else if is_immediate nr then begin
+ ` mvn {emit_reg r}, #{emit_nativeint nr}\n`; 1
+ end else begin
+ let first = ref true in
+ decompose_intconst n
+ (fun bits ->
+ if !first
+ then ` mov {emit_reg r}, #{emit_nativeint bits} @ {emit_nativeint n}\n`
+ else ` add {emit_reg r}, {emit_reg r}, #{emit_nativeint bits}\n`;
+ first := false)
+ end
(* Adjust sp (up or down) by the given byte amount *)
@@ -203,13 +210,6 @@ let emit_stack_adjustment instr n =
(fun bits ->
` {emit_string instr} sp, sp, #{emit_nativeint bits}\n`)
-(* Adjust alloc_ptr down by the given byte amount *)
-
-let emit_alloc_decrement n =
- decompose_intconst (Nativeint.of_int n)
- (fun bits ->
- ` sub alloc_ptr, alloc_ptr, #{emit_nativeint bits}\n`)
-
(* Name of current function *)
let function_name = ref ""
(* Entry point for tail recursive calls *)
@@ -265,14 +265,7 @@ let emit_instr i =
assert false
end
| Lop(Iconst_int n) ->
- let r = i.res.(0) in
- let nr = Nativeint.lognot n in
- if is_immediate n then begin
- ` mov {emit_reg r}, #{emit_nativeint n}\n`; 1
- end else if is_immediate nr then begin
- ` mvn {emit_reg r}, #{emit_nativeint nr}\n`; 1
- end else
- emit_complex_intconst r n
+ emit_intconst i.res.(0) n
| Lop(Iconst_float s) ->
let bits = Int64.bits_of_float (float_of_string s) in
let high_bits = Int64.to_nativeint (Int64.shift_right_logical bits 32)
@@ -292,7 +285,7 @@ let emit_instr i =
` ldr {emit_reg i.res.(0)}, {emit_label lbl} @ {emit_symbol s}\n`; 1
| Lop(Icall_ind) ->
` mov lr, pc\n`;
- `{record_frame i.live} mov pc, {emit_reg i.arg.(0)}\n`; 2
+ `{record_frame i.live} bx {emit_reg i.arg.(0)}\n`; 2
| Lop(Icall_imm s) ->
`{record_frame i.live} bl {emit_symbol s}\n`; 1
| Lop(Itailcall_ind) ->
@@ -300,7 +293,7 @@ let emit_instr i =
if !contains_calls then
` ldr lr, [sp, #{emit_int (n-4)}]\n`;
let ninstr = emit_stack_adjustment "add" n in
- ` mov pc, {emit_reg i.arg.(0)}\n`;
+ ` bx {emit_reg i.arg.(0)}\n`;
2 + ninstr
| Lop(Itailcall_imm s) ->
if s = !function_name then begin
@@ -316,7 +309,7 @@ let emit_instr i =
| Lop(Iextcall(s, alloc)) ->
if alloc then begin
let lbl = label_constant symbol_constants s 1 in
- ` ldr r10, {emit_label lbl} @ {emit_symbol s}\n`;
+ ` ldr r12, {emit_label lbl} @ {emit_symbol s}\n`;
`{record_frame i.live} bl caml_c_call\n`; 2
end else begin
` bl {emit_symbol s}\n`; 1
@@ -366,25 +359,20 @@ let emit_instr i =
1
| Lop(Ialloc n) ->
if !fastcode_flag then begin
- ` ldr r10, [alloc_limit, #0]\n`;
- let ni = emit_alloc_decrement n in
- ` cmp alloc_ptr, r10\n`;
+ let ni = emit_intconst (phys_reg 8 (*r12*)) (Nativeint.of_int n) in
+ ` sub alloc_ptr, alloc_ptr, r12\n`;
+ ` cmp alloc_ptr, alloc_limit\n`;
`{record_frame i.live} blcc caml_call_gc\n`;
` add {emit_reg i.res.(0)}, alloc_ptr, #4\n`;
4 + ni
end else if n = 8 || n = 12 || n = 16 then begin
- `{record_frame i.live} bl caml_alloc{emit_int ((n-4)/4)}\n`;
- ` add {emit_reg i.res.(0)}, alloc_ptr, #4\n`; 2
+ `{record_frame i.live} bl caml_alloc{emit_int ((n-4)/4)}\n`;
+ ` add {emit_reg i.res.(0)}, alloc_ptr, #4\n`; 2
end else begin
- let nn = Nativeint.of_int n in
- let ni =
- if is_immediate nn then begin
- ` mov r10, #{emit_int n}\n`; 1
- end else
- emit_complex_intconst (phys_reg 8 (*r10*)) nn in
- `{record_frame i.live} bl caml_allocN\n`;
- ` add {emit_reg i.res.(0)}, alloc_ptr, #4\n`;
- 2 + ni
+ let ni = emit_intconst (phys_reg 8 (*r12*)) (Nativeint.of_int n) in
+ `{record_frame i.live} bl caml_allocN\n`;
+ ` add {emit_reg i.res.(0)}, alloc_ptr, #4\n`;
+ 2 + ni
end
| Lop(Iintop(Ilsl | Ilsr | Iasr as op)) ->
let shift = name_for_shift_operation op in
@@ -460,7 +448,7 @@ let emit_instr i =
` ldr lr, [sp, #{emit_int(n-4)}]\n`; 1
| Lreturn ->
let ninstr = emit_stack_adjustment "add" (frame_size()) in
- ` mov pc, lr\n`;
+ ` bx lr\n`;
ninstr + 1
| Llabel lbl ->
`{emit_label lbl}:\n`; 0
@@ -619,7 +607,7 @@ let data l =
let begin_assembly() =
`trap_ptr .req r11\n`;
`alloc_ptr .req r8\n`;
- `alloc_limit .req r9\n`;
+ `alloc_limit .req r10\n`;
let lbl_begin = Compilenv.make_symbol (Some "data_begin") in
` .data\n`;
` .global {emit_symbol lbl_begin}\n`;
diff --git a/asmcomp/arm/proc.ml b/asmcomp/arm/proc.ml
index 0916a83239..06b085b4df 100644
--- a/asmcomp/arm/proc.ml
+++ b/asmcomp/arm/proc.ml
@@ -27,26 +27,27 @@ let word_addressed = false
(* Registers available for register allocation *)
(* Register map:
- r0 - r7 general purpose (r4 - r7 preserved by C)
- r8 allocation pointer (preserved by C)
- r9 allocation limit (preserved by C)
- r10 general purpose
- r11 trap pointer (preserved by C)
- r12 general purpose
+ r0 - r3 general purpose (not preserved by C)
+ r4 - r7 general purpose (preserved)
+ r8 allocation pointer (preserved)
+ r9 platform register, usually reserved
+ r10 allocation limit (preserved)
+ r11 trap pointer (preserved)
+ r12 general purpose (not preserved by C)
r13 stack pointer
r14 return address
r15 program counter
*)
let int_reg_name = [|
- "r0"; "r1"; "r2"; "r3"; "r4"; "r5"; "r6"; "r7"; "r10"; "r12"
+ "r0"; "r1"; "r2"; "r3"; "r4"; "r5"; "r6"; "r7"; "r12"
|]
let num_register_classes = 1
let register_class r = assert (r.typ <> Float); 0
-let num_available_registers = [| 10 |]
+let num_available_registers = [| 9 |]
let first_available_register = [| 0 |]
@@ -57,8 +58,8 @@ let rotate_registers = true
(* Representation of hard registers by pseudo-registers *)
let hard_int_reg =
- let v = Array.create 10 Reg.dummy in
- for i = 0 to 9 do v.(i) <- Reg.at_location Int (Reg i) done;
+ let v = Array.create 9 Reg.dummy in
+ for i = 0 to 8 do v.(i) <- Reg.at_location Int (Reg i) done;
v
let all_phys_regs = hard_int_reg
@@ -113,13 +114,13 @@ let loc_exn_bucket = phys_reg 0
(* Registers destroyed by operations *)
-let destroyed_at_c_call = (* r4-r9 preserved *)
- Array.of_list(List.map phys_reg [0;1;2;3;8;9])
+let destroyed_at_c_call = (* r4-r7 preserved *)
+ Array.of_list(List.map phys_reg [0;1;2;3;8])
let destroyed_at_oper = function
Iop(Icall_ind | Icall_imm _ | Iextcall(_, true)) -> all_phys_regs
| Iop(Iextcall(_, false)) -> destroyed_at_c_call
- | Iop(Ialloc(_)) -> [|phys_reg 8|] (* r10 destroyed *)
+ | Iop(Ialloc(_)) -> [|phys_reg 8|] (* r12 destroyed *)
| _ -> [||]
let destroyed_at_raise = all_phys_regs
@@ -128,10 +129,10 @@ let destroyed_at_raise = all_phys_regs
let safe_register_pressure = function
Iextcall(_, _) -> 4
- | _ -> 10
+ | _ -> 9
let max_register_pressure = function
Iextcall(_, _) -> [| 4 |]
- | _ -> [| 10 |]
+ | _ -> [| 9 |]
(* Layout of the stack *)
diff --git a/asmrun/arm.S b/asmrun/arm.S
index 674c99de67..8a47d182c9 100644
--- a/asmrun/arm.S
+++ b/asmrun/arm.S
@@ -17,126 +17,123 @@
trap_ptr .req r11
alloc_ptr .req r8
-alloc_limit .req r9
+alloc_limit .req r10
.text
/* Allocation functions and GC interface */
- .global caml_call_gc
+ .globl caml_call_gc
caml_call_gc:
- /* Record return address */
- /* We can use r10 as a temp reg since it's not live here */
- ldr r10, .Lcaml_last_return_address
- str lr, [r10, #0]
+ /* Record return address and desired size */
+ /* Can use alloc_limit as a temporary since it will be reloaded by
+ invoke_gc */
+ ldr alloc_limit, .Lcaml_last_return_address
+ str lr, [alloc_limit, #0]
+ ldr alloc_limit, .Lcaml_requested_size
+ str r12, [alloc_limit, #0]
/* Branch to shared GC code */
bl .Linvoke_gc
- /* Restart allocation sequence (4 instructions before) */
- sub lr, lr, #16
- mov pc, lr
+ /* Finish allocation */
+ ldr r12, .Lcaml_requested_size
+ ldr r12, [r12, #0]
+ sub alloc_ptr, alloc_ptr, r12
+ bx lr
- .global caml_alloc1
+ .globl caml_alloc1
caml_alloc1:
- ldr r10, [alloc_limit, #0]
sub alloc_ptr, alloc_ptr, #8
- cmp alloc_ptr, r10
+ cmp alloc_ptr, alloc_limit
movcs pc, lr /* Return if alloc_ptr >= alloc_limit */
/* Record return address */
- ldr r10, .Lcaml_last_return_address
- str lr, [r10, #0]
+ ldr r12, .Lcaml_last_return_address
+ str lr, [r12, #0]
/* Invoke GC */
bl .Linvoke_gc
/* Try again */
b caml_alloc1
- .global caml_alloc2
+ .globl caml_alloc2
caml_alloc2:
- ldr r10, [alloc_limit, #0]
sub alloc_ptr, alloc_ptr, #12
- cmp alloc_ptr, r10
+ cmp alloc_ptr, alloc_limit
movcs pc, lr /* Return if alloc_ptr >= alloc_limit */
/* Record return address */
- ldr r10, .Lcaml_last_return_address
- str lr, [r10, #0]
+ ldr r12, .Lcaml_last_return_address
+ str lr, [r12, #0]
/* Invoke GC */
bl .Linvoke_gc
/* Try again */
b caml_alloc2
- .global caml_alloc3
+ .globl caml_alloc3
caml_alloc3:
- ldr r10, [alloc_limit, #0]
sub alloc_ptr, alloc_ptr, #16
- cmp alloc_ptr, r10
+ cmp alloc_ptr, alloc_limit
movcs pc, lr /* Return if alloc_ptr >= alloc_limit */
/* Record return address */
- ldr r10, .Lcaml_last_return_address
- str lr, [r10, #0]
+ ldr r12, .Lcaml_last_return_address
+ str lr, [r12, #0]
/* Invoke GC */
bl .Linvoke_gc
/* Try again */
b caml_alloc3
- .global caml_allocN
+ .globl caml_allocN
caml_allocN:
- str r12, [sp, #-4]!
- ldr r12, [alloc_limit, #0]
- sub alloc_ptr, alloc_ptr, r10
- cmp alloc_ptr, r12
- ldr r12, [sp], #4
+ sub alloc_ptr, alloc_ptr, r12
+ cmp alloc_ptr, alloc_limit
movcs pc, lr /* Return if alloc_ptr >= alloc_limit */
/* Record return address and desired size */
+ /* Can use alloc_limit as a temporary since it will be reloaded by
+ invoke_gc */
ldr alloc_limit, .Lcaml_last_return_address
str lr, [alloc_limit, #0]
- ldr alloc_limit, .LLcaml_requested_size
- str r10, [alloc_limit, #0]
+ ldr alloc_limit, .Lcaml_requested_size
+ str r12, [alloc_limit, #0]
/* Invoke GC */
bl .Linvoke_gc
/* Try again */
- ldr r10, .LLcaml_requested_size
- ldr r10, [r10, #0]
+ ldr r12, .Lcaml_requested_size
+ ldr r12, [r12, #0]
b caml_allocN
/* Shared code to invoke the GC */
.Linvoke_gc:
/* Record lowest stack address */
- ldr r10, .Lcaml_bottom_of_stack
- str sp, [r10, #0]
+ ldr r12, .Lcaml_bottom_of_stack
+ str sp, [r12, #0]
/* Save integer registers and return address on stack */
- sub sp, sp, #4 /* preserve 8-alignment */
- stmfd sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r10,r12,lr}
+ stmfd sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r12,lr}
/* Store pointer to saved integer registers in caml_gc_regs */
- ldr r10, .Lcaml_gc_regs
- str sp, [r10, #0]
+ ldr r12, .Lcaml_gc_regs
+ str sp, [r12, #0]
/* Save current allocation pointer for debugging purposes */
- ldr r10, .Lcaml_young_ptr
- str alloc_ptr, [r10, #0]
+ ldr r12, .Lcaml_young_ptr
+ str alloc_ptr, [r12, #0]
/* Save trap pointer in case an exception is raised during GC */
- ldr r10, .Lcaml_exception_pointer
- str trap_ptr, [r10, #0]
+ ldr r12, .Lcaml_exception_pointer
+ str trap_ptr, [r12, #0]
/* Call the garbage collector */
bl caml_garbage_collection
/* Restore the registers from the stack */
- ldmfd sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r10,r12}
+ ldmfd sp!, {r0,r1,r2,r3,r4,r5,r6,r7,r12}
/* Reload return address */
- ldr r10, .Lcaml_last_return_address
- ldr lr, [r10, #0]
- /* Say that we are back into Caml code */
- mov alloc_ptr, #0
- str alloc_ptr, [r10, #0]
+ ldr r12, .Lcaml_last_return_address
+ ldr lr, [r12, #0]
/* Reload new allocation pointer and allocation limit */
- ldr r10, .Lcaml_young_ptr
- ldr alloc_ptr, [r10, #0]
- ldr alloc_limit, .Lcaml_young_limit
+ ldr r12, .Lcaml_young_ptr
+ ldr alloc_ptr, [r12, #0]
+ ldr r12, .Lcaml_young_limit
+ ldr alloc_limit, [r12, #0]
/* Return to caller */
- ldr r10, [sp, #0]
- add sp, sp, #8
- mov pc, r10
+ ldr r12, [sp], #4
+ bx r12
/* Call a C function from Caml */
-/* Function to call is in r10 */
+/* Function to call is in r12 */
- .global caml_c_call
+ .globl caml_c_call
caml_c_call:
/* Preserve return address in callee-save register r4 */
mov r4, lr
@@ -152,28 +149,27 @@ caml_c_call:
str trap_ptr, [r7, #0]
/* Call the function */
mov lr, pc
- mov pc, r10
- /* Reload alloc ptr */
+ bx r12
+ /* Reload alloc ptr and alloc limit */
+ ldr r5, .Lcaml_young_limit
ldr alloc_ptr, [r6, #0] /* r6 still points to caml_young_ptr */
- /* Say that we are back into Caml code */
- mov r6, #0
- str r6, [r5, #0] /* r5 still points to caml_last_return_address */
+ ldr alloc_limit, [r5, #0]
/* Return */
- mov pc, r4
+ bx r4
/* Start the Caml program */
- .global caml_start_program
+ .globl caml_start_program
caml_start_program:
- ldr r10, .Lcaml_program
+ ldr r12, .Lcaml_program
/* Code shared with caml_callback* */
-/* Address of Caml code to call is in r10 */
+/* Address of Caml code to call is in r12 */
/* Arguments to the Caml code are in r0...r3 */
.Ljump_to_caml:
/* Save return address and callee-save registers */
- stmfd sp!, {r4,r5,r6,r7,r8,r9,r11,lr} /* 8-alignment */
+ stmfd sp!, {r4,r5,r6,r7,r8,r10,r11,lr} /* 8-alignment */
/* Setup a callback link on the stack */
sub sp, sp, #4*4 /* 8-alignment */
ldr r4, .Lcaml_bottom_of_stack
@@ -196,14 +192,11 @@ caml_start_program:
/* Reload allocation pointers */
ldr r4, .Lcaml_young_ptr
ldr alloc_ptr, [r4, #0]
- ldr alloc_limit, .Lcaml_young_limit
- /* We are back into Caml code */
- ldr r4, .Lcaml_last_return_address
- mov r5, #0
- str r5, [r4, #0]
+ ldr r4, .Lcaml_young_limit
+ ldr alloc_limit, [r4, #0]
/* Call the Caml code */
mov lr, pc
- mov pc, r10
+ bx r12
.Lcaml_retaddr:
/* Pop the trap frame, restoring caml_exception_pointer */
ldr r4, .Lcaml_exception_pointer
@@ -226,7 +219,8 @@ caml_start_program:
ldr r4, .Lcaml_young_ptr
str alloc_ptr, [r4, #0]
/* Reload callee-save registers and return */
- ldmfd sp!, {r4,r5,r6,r7,r8,r9,r11,pc}
+ ldmfd sp!, {r4,r5,r6,r7,r8,r10,r11,lr}
+ bx lr
/* The trap handler */
.Ltrap_handler:
@@ -240,59 +234,56 @@ caml_start_program:
/* Raise an exception from C */
- .global caml_raise_exception
+ .globl caml_raise_exception
caml_raise_exception:
/* Reload Caml allocation pointers */
- ldr r1, .Lcaml_young_ptr
- ldr alloc_ptr, [r1, #0]
- ldr alloc_limit, .Lcaml_young_limit
- /* Say we're back into Caml */
- ldr r1, .Lcaml_last_return_address
- mov r2, #0
- str r2, [r1, #0]
+ ldr r12, .Lcaml_young_ptr
+ ldr alloc_ptr, [r12, #0]
+ ldr r12, .Lcaml_young_limit
+ ldr alloc_limit, [r12, #0]
/* Cut stack at current trap handler */
- ldr r1, .Lcaml_exception_pointer
- ldr sp, [r1, #0]
+ ldr r12, .Lcaml_exception_pointer
+ ldr sp, [r12, #0]
/* Pop previous handler and addr of trap, and jump to it */
ldmfd sp!, {trap_ptr, pc}
/* Callback from C to Caml */
- .global caml_callback_exn
+ .globl caml_callback_exn
caml_callback_exn:
/* Initial shuffling of arguments (r0 = closure, r1 = first arg) */
- mov r10, r0
+ mov r12, r0
mov r0, r1 /* r0 = first arg */
- mov r1, r10 /* r1 = closure environment */
- ldr r10, [r10, #0] /* code pointer */
+ mov r1, r12 /* r1 = closure environment */
+ ldr r12, [r12, #0] /* code pointer */
b .Ljump_to_caml
- .global caml_callback2_exn
+ .globl caml_callback2_exn
caml_callback2_exn:
/* Initial shuffling of arguments (r0 = closure, r1 = arg1, r2 = arg2) */
- mov r10, r0
+ mov r12, r0
mov r0, r1 /* r0 = first arg */
mov r1, r2 /* r1 = second arg */
- mov r2, r10 /* r2 = closure environment */
- ldr r10, .Lcaml_apply2
+ mov r2, r12 /* r2 = closure environment */
+ ldr r12, .Lcaml_apply2
b .Ljump_to_caml
- .global caml_callback3_exn
+ .globl caml_callback3_exn
caml_callback3_exn:
/* Initial shuffling of arguments */
/* (r0 = closure, r1 = arg1, r2 = arg2, r3 = arg3) */
- mov r10, r0
+ mov r12, r0
mov r0, r1 /* r0 = first arg */
mov r1, r2 /* r1 = second arg */
mov r2, r3 /* r2 = third arg */
- mov r3, r10 /* r3 = closure environment */
- ldr r10, .Lcaml_apply3
+ mov r3, r12 /* r3 = closure environment */
+ ldr r12, .Lcaml_apply3
b .Ljump_to_caml
- .global caml_ml_array_bound_error
+ .globl caml_ml_array_bound_error
caml_ml_array_bound_error:
- /* Load address of [caml_array_bound_error] in r10 */
- ldr r10, .Lcaml_array_bound_error
+ /* Load address of [caml_array_bound_error] in r12 */
+ ldr r12, .Lcaml_array_bound_error
/* Call that function */
b caml_c_call
@@ -308,17 +299,17 @@ caml_ml_array_bound_error:
.LLtrap_handler: .word .Ltrap_handler
.Lcaml_apply2: .word caml_apply2
.Lcaml_apply3: .word caml_apply3
-.LLcaml_requested_size: .word .Lcaml_requested_size
.Lcaml_array_bound_error: .word caml_array_bound_error
+.Lcaml_requested_size: .word caml_requested_size
-.data
-.Lcaml_requested_size: .word 0
+ .data
+caml_requested_size:
+ .word 0
/* GC roots for callback */
.data
-
- .global caml_system__frametable
+ .globl caml_system__frametable
caml_system__frametable:
.word 1 /* one descriptor */
.word .Lcaml_retaddr /* return address into callback */
diff --git a/asmrun/signals_osdep.h b/asmrun/signals_osdep.h
index 3a4a8fc670..65b5e17bc6 100644
--- a/asmrun/signals_osdep.h
+++ b/asmrun/signals_osdep.h
@@ -76,6 +76,25 @@
#define CONTEXT_SP (CONTEXT_STATE.CONTEXT_REG(rsp))
#define CONTEXT_FAULTING_ADDRESS ((char *) info->si_addr)
+/****************** ARM, Linux */
+
+#elif defined(TARGET_arm) && defined (SYS_linux)
+
+ #include <sys/ucontext.h>
+
+ #define DECLARE_SIGNAL_HANDLER(name) \
+ static void name(int sig, siginfo_t * info, ucontext_t * context)
+
+ #define SET_SIGACT(sigact,name) \
+ sigact.sa_sigaction = (void (*)(int,siginfo_t *,void *)) (name); \
+ sigact.sa_flags = SA_SIGINFO
+
+ typedef unsigned long context_reg;
+ #define CONTEXT_PC (context->uc_mcontext.arm_pc)
+ #define CONTEXT_EXCEPTION_POINTER (context->uc_mcontext.arm_fp)
+ #define CONTEXT_YOUNG_PTR (context->uc_mcontext.arm_r8)
+ #define CONTEXT_FAULTING_ADDRESS ((char *) context->uc_mcontext.fault_address)
+
/****************** I386, Linux */
#elif defined(TARGET_i386) && defined(SYS_linux_elf)