summaryrefslogtreecommitdiff
path: root/libguile/jit.c
diff options
context:
space:
mode:
authorAndy Wingo <wingo@pobox.com>2020-04-29 21:23:53 +0200
committerAndy Wingo <wingo@pobox.com>2020-04-29 21:47:37 +0200
commitd6b6daca372e3a7d2abc601e2b60d6c2cc6c0abc (patch)
treea9c7ad961f0f8879aa7217961b9ad1c5f408b426 /libguile/jit.c
parent3d96c87cf82e3f2f4d73195cda6753ebe5e6ad74 (diff)
downloadguile-d6b6daca372e3a7d2abc601e2b60d6c2cc6c0abc.tar.gz
Add intrinsics for a baseline compiler
Since there's no optimization in the baseline compiler, there's no sense in instruction explosion. * libguile/intrinsics.h: * libguile/intrinsics.c ($car, $cdr, $set-car!, $set-cdr!, $variable-ref, $variable-set!, $vector-length, $vector-ref, $vector-set!, $vector-ref/immediate, $vector-set!, $allocate-struct, $struct-vtable, $struct-ref, $struct-set! $struct-ref/immediate, $struct-set!): New intrinsics. * libguile/jit.c (compile_call_scm_scm, compile_call_scm_scm_slow) (compile_call_scm_scm_scm, compile_call_scm_scm_scm_slow) (compile_call_scm_uimm_scm, compile_call_scm_uimm_scm_slow): New code generators. * libguile/vm-engine.c (call-scm-scm, call-scm-scm-scm, call-scm-uimm-scm): New instructions. * module/system/vm/assembler.scm (emit-null?, emit-false?, emit-nil?): Export these. Also export emitters for the new intrinsics. (define-scm-scm-intrinsic, define-scm-uimm-scm-intrinsic) (define-scm-scm-scm-intrinsic): New helpers. * doc/ref/vm.texi (Intrinsic Call Instructions): Add new instructions.
Diffstat (limited to 'libguile/jit.c')
-rw-r--r--libguile/jit.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/libguile/jit.c b/libguile/jit.c
index 7e5852c3e..ede16ea5e 100644
--- a/libguile/jit.c
+++ b/libguile/jit.c
@@ -3147,6 +3147,56 @@ compile_call_scm_from_thread_slow (scm_jit_state *j, uint32_t dst, uint32_t idx)
}
static void
+compile_call_scm_scm (scm_jit_state *j, uint16_t a, uint16_t b, uint32_t idx)
+{
+ void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
+
+ emit_store_current_ip (j, T0);
+ emit_call_2 (j, intrinsic, sp_scm_operand (j, a), sp_scm_operand (j, b));
+ emit_reload_sp (j);
+}
+static void
+compile_call_scm_scm_slow (scm_jit_state *j, uint16_t a, uint16_t b,
+ uint32_t idx)
+{
+}
+
+static void
+compile_call_scm_scm_scm (scm_jit_state *j, uint8_t a, uint8_t b, uint8_t c,
+ uint32_t idx)
+{
+ void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
+
+ emit_store_current_ip (j, T0);
+ emit_call_3 (j, intrinsic, sp_scm_operand (j, a), sp_scm_operand (j, b),
+ sp_scm_operand (j, c));
+ emit_reload_sp (j);
+}
+static void
+compile_call_scm_scm_scm_slow (scm_jit_state *j, uint8_t a, uint8_t b,
+ uint8_t c, uint32_t idx)
+{
+}
+
+static void
+compile_call_scm_uimm_scm (scm_jit_state *j, uint8_t a, uint8_t b, uint8_t c,
+ uint32_t idx)
+{
+ void *intrinsic = ((void **) &scm_vm_intrinsics)[idx];
+
+ emit_store_current_ip (j, T0);
+ emit_call_3 (j, intrinsic, sp_scm_operand (j, a),
+ jit_operand_imm (JIT_OPERAND_ABI_UINT8, b),
+ sp_scm_operand (j, c));
+ emit_reload_sp (j);
+}
+static void
+compile_call_scm_uimm_scm_slow (scm_jit_state *j, uint8_t a, uint8_t b,
+ uint8_t c, uint32_t idx)
+{
+}
+
+static void
compile_fadd (scm_jit_state *j, uint8_t dst, uint8_t a, uint8_t b)
{
emit_sp_ref_f64 (j, JIT_F0, a);
@@ -5262,6 +5312,8 @@ compile_s64_to_f64_slow (scm_jit_state *j, uint16_t dst, uint16_t src)
}
#define COMPILE_X8_S8_S8_S8__C32(j, comp) \
COMPILE_X8_S8_S8_C8__C32(j, comp)
+#define COMPILE_X8_S8_C8_S8__C32(j, comp) \
+ COMPILE_X8_S8_S8_C8__C32(j, comp)
#define COMPILE_X32__LO32__L32(j, comp) \
{ \