summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bootstraptest/test_yjit.rb22
-rw-r--r--yjit/bindgen/src/main.rs1
-rw-r--r--yjit/src/codegen.rs79
-rw-r--r--yjit/src/cruby_bindings.inc.rs1
-rw-r--r--yjit/src/stats.rs2
5 files changed, 76 insertions, 29 deletions
diff --git a/bootstraptest/test_yjit.rb b/bootstraptest/test_yjit.rb
index 7c2a50c4e0..d7dd0cd460 100644
--- a/bootstraptest/test_yjit.rb
+++ b/bootstraptest/test_yjit.rb
@@ -3632,3 +3632,25 @@ assert_normal_exit %q{
entry
}
+
+# Test that splat and rest combined
+# properly dupe the array
+assert_equal "[]", %q{
+ def foo(*rest)
+ rest << 1
+ end
+
+ def test(splat)
+ foo(*splat)
+ end
+
+ EMPTY = []
+ custom = Object.new
+ def custom.to_a
+ EMPTY
+ end
+
+ test(custom)
+ test(custom)
+ EMPTY
+}
diff --git a/yjit/bindgen/src/main.rs b/yjit/bindgen/src/main.rs
index d0551aedb7..481c403714 100644
--- a/yjit/bindgen/src/main.rs
+++ b/yjit/bindgen/src/main.rs
@@ -135,6 +135,7 @@ fn main() {
.allowlist_function("rb_ary_store")
.allowlist_function("rb_ary_resurrect")
.allowlist_function("rb_ary_clear")
+ .allowlist_function("rb_ary_dup")
// From internal/array.h
.allowlist_function("rb_ec_ary_new_from_values")
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 191d1d5f56..869c93fdbb 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -5252,11 +5252,6 @@ fn gen_send_iseq(
return CantCompile;
}
- if iseq_has_rest && flags & VM_CALL_ARGS_SPLAT != 0 {
- gen_counter_incr!(asm, send_iseq_has_rest_and_splat);
- return CantCompile;
- }
-
if iseq_has_rest && flags & VM_CALL_OPT_SEND != 0 {
gen_counter_incr!(asm, send_iseq_has_rest_and_send);
return CantCompile;
@@ -5315,6 +5310,19 @@ fn gen_send_iseq(
let mut start_pc_offset = 0;
let required_num = unsafe { get_iseq_body_param_lead_num(iseq) };
+ // If we have a rest and a splat, we can take a shortcut if the
+ // number of non-splat arguments is equal to the number of required
+ // arguments.
+ // For example:
+ // def foo(a, b, *rest)
+ // foo(1, 2, *[3, 4])
+ // In this case, we can just dup the splat array as the rest array.
+ // No need to move things around between the array and stack.
+ if iseq_has_rest && flags & VM_CALL_ARGS_SPLAT != 0 && argc - 1 != required_num {
+ gen_counter_incr!(asm, send_iseq_has_rest_and_splat_not_equal);
+ return CantCompile;
+ }
+
// This struct represents the metadata about the caller-specified
// keyword arguments.
let kw_arg = unsafe { vm_ci_kwarg(ci) };
@@ -5542,7 +5550,7 @@ fn gen_send_iseq(
};
// push_splat_args does stack manipulation so we can no longer side exit
- if flags & VM_CALL_ARGS_SPLAT != 0 {
+ if flags & VM_CALL_ARGS_SPLAT != 0 && !iseq_has_rest {
// If block_arg0_splat, we still need side exits after this, but
// doing push_splat_args here disallows it. So bail out.
if block_arg0_splat {
@@ -5765,37 +5773,52 @@ fn gen_send_iseq(
argc = lead_num;
}
+
if iseq_has_rest {
- assert!(argc >= required_num);
// We are going to allocate so setting pc and sp.
jit_save_pc(jit, asm);
gen_save_sp(asm, ctx);
- let n = (argc - required_num) as u32;
- argc = required_num + 1;
- // If n is 0, then elts is never going to be read, so we can just pass null
- let values_ptr = if n == 0 {
- Opnd::UImm(0)
+ if flags & VM_CALL_ARGS_SPLAT != 0 {
+ // We guarded above that if there is a splat and rest
+ // the number of arguments lines up.
+ // So we are just going to dupe the array and push it onto the stack.
+ let array = ctx.stack_pop(1);
+ let array = asm.ccall(
+ rb_ary_dup as *const u8,
+ vec![array],
+ );
+ let stack_ret = ctx.stack_push(Type::TArray);
+ asm.mov(stack_ret, array);
+
} else {
- asm.comment("load pointer to array elts");
- let offset_magnitude = SIZEOF_VALUE as u32 * n;
- let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
- asm.lea(values_opnd)
- };
+ assert!(argc >= required_num);
+ let n = (argc - required_num) as u32;
+ argc = required_num + 1;
+ // If n is 0, then elts is never going to be read, so we can just pass null
+ let values_ptr = if n == 0 {
+ Opnd::UImm(0)
+ } else {
+ asm.comment("load pointer to array elts");
+ let offset_magnitude = SIZEOF_VALUE as u32 * n;
+ let values_opnd = ctx.sp_opnd(-(offset_magnitude as isize));
+ asm.lea(values_opnd)
+ };
- let new_ary = asm.ccall(
- rb_ec_ary_new_from_values as *const u8,
- vec![
- EC,
- Opnd::UImm(n.into()),
- values_ptr
- ]
- );
+ let new_ary = asm.ccall(
+ rb_ec_ary_new_from_values as *const u8,
+ vec![
+ EC,
+ Opnd::UImm(n.into()),
+ values_ptr
+ ]
+ );
- ctx.stack_pop(n.as_usize());
- let stack_ret = ctx.stack_push(Type::CArray);
- asm.mov(stack_ret, new_ary);
+ ctx.stack_pop(n.as_usize());
+ let stack_ret = ctx.stack_push(Type::CArray);
+ asm.mov(stack_ret, new_ary);
+ }
}
// Points to the receiver operand on the stack unless a captured environment is used
diff --git a/yjit/src/cruby_bindings.inc.rs b/yjit/src/cruby_bindings.inc.rs
index b366950e7d..21a6d09e84 100644
--- a/yjit/src/cruby_bindings.inc.rs
+++ b/yjit/src/cruby_bindings.inc.rs
@@ -1095,6 +1095,7 @@ extern "C" {
pub fn rb_obj_class(obj: VALUE) -> VALUE;
pub fn rb_ary_new_capa(capa: ::std::os::raw::c_long) -> VALUE;
pub fn rb_ary_store(ary: VALUE, key: ::std::os::raw::c_long, val: VALUE);
+ pub fn rb_ary_dup(ary: VALUE) -> VALUE;
pub fn rb_ary_resurrect(ary: VALUE) -> VALUE;
pub fn rb_ary_clear(ary: VALUE) -> VALUE;
pub fn rb_hash_new() -> VALUE;
diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs
index 2587042d79..4a13997f74 100644
--- a/yjit/src/stats.rs
+++ b/yjit/src/stats.rs
@@ -253,7 +253,7 @@ make_counters! {
send_send_getter,
send_send_builtin,
send_iseq_has_rest_and_captured,
- send_iseq_has_rest_and_splat,
+ send_iseq_has_rest_and_splat_not_equal,
send_iseq_has_rest_and_send,
send_iseq_has_rest_and_block,
send_iseq_has_rest_and_kw,