From a82a24ed575c5f97b3755317f1a3bd6c9149a67e Mon Sep 17 00:00:00 2001 From: Jeremy Evans Date: Sun, 2 Apr 2023 11:28:01 -0700 Subject: Optimize method_missing calls CALLER_ARG_SPLAT is not necessary for method_missing. We just need to unshift the method name into the arguments. This optimizes all method_missing calls: * mm(recv) ~9% * mm(recv, *args) ~215% for args.length == 200 * mm(recv, *args, **kw) ~55% for args.length == 200 * mm(recv, **kw) ~22% * mm(recv, kw: 1) ~100% Note that empty argument splats do get slower with this approach, by about 30-40%. Other than non-empty argument splats, other argument splats are faster, with the speedup depending on the number of arguments. --- benchmark/vm_call_method_missing.yml | 62 ++++++++++++++++++++++++++++++++++++ vm_insnhelper.c | 28 ++++++---------- 2 files changed, 71 insertions(+), 19 deletions(-) create mode 100644 benchmark/vm_call_method_missing.yml diff --git a/benchmark/vm_call_method_missing.yml b/benchmark/vm_call_method_missing.yml new file mode 100644 index 0000000000..f890796f11 --- /dev/null +++ b/benchmark/vm_call_method_missing.yml @@ -0,0 +1,62 @@ +prelude: | + class A0 + def method_missing(m); m end + end + class A1 + def method_missing(m, a) a; end + end + class S + def method_missing(m, *a) a; end + end + class B + def method_missing(m, kw: 1) kw end + end + class SB + def method_missing(m, *a, kw: 1) kw end + end + + t0 = 0.times.to_a + t1 = 1.times.to_a + t10 = 10.times.to_a + t200 = 200.times.to_a + kw = {kw: 2} + + a0 = A0.new + a1 = A1.new + s = S.new + b = B.new + sb = SB.new +benchmark: + method_missing_simple_0: | + a0.() + method_missing_simple_1: | + a1.x(1) + method_missing_simple_0_splat: | + a0.(*t0) + method_missing_simple_1_splat: | + a1.(*t1) + method_missing_no_splat: | + s.() + method_missing_0_splat: | + s.(*t0) + method_missing_1_splat: | + s.(*t1) + method_missing_10_splat: | + s.(*t10) + method_missing_200_splat: | + s.(*t200) + method_missing_kw: | + b.(kw: 1) + method_missing_no_kw: | + b.() + method_missing_kw_splat: | + b.(**kw) + method_missing_0_splat_kw: | + sb.(*t0, **kw) + method_missing_1_splat_kw: | + sb.(*t1, **kw) + method_missing_10_splat_kw: | + sb.(*t10, **kw) + method_missing_200_splat_kw: | + sb.(*t200, **kw) +loop_count: 1000000 diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 362ac7dcb2..917333d248 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -4006,27 +4006,17 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_ VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc); unsigned int argc, flag; - CALLER_SETUP_ARG(reg_cfp, calling, orig_ci, ALLOW_HEAP_ARGV); - if (UNLIKELY(calling->heap_argv)) { - flag = VM_CALL_ARGS_SPLAT | VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); - argc = 1; - rb_ary_unshift(calling->heap_argv, ID2SYM(vm_ci_mid(orig_ci))); - } - else { - argc = calling->argc + 1; + flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | vm_ci_flag(orig_ci); + argc = ++calling->argc; - flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); - calling->argc = argc; - - /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */ - CHECK_VM_STACK_OVERFLOW(reg_cfp, 1); - vm_check_canary(ec, reg_cfp->sp); - if (argc > 1) { - MEMMOVE(argv+1, argv, VALUE, argc-1); - } - argv[0] = ID2SYM(vm_ci_mid(orig_ci)); - INC_SP(1); + /* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */ + CHECK_VM_STACK_OVERFLOW(reg_cfp, 1); + vm_check_canary(ec, reg_cfp->sp); + if (argc > 1) { + MEMMOVE(argv+1, argv, VALUE, argc-1); } + argv[0] = ID2SYM(vm_ci_mid(orig_ci)); + INC_SP(1); ec->method_missing_reason = reason; calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci)); -- cgit v1.2.1