summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Ker-Seymer <hello@ianks.com>2023-01-12 10:14:17 -0500
committerGitHub <noreply@github.com>2023-01-12 10:14:17 -0500
commit8d3ff663899211c9c0ca2a8cf7d994e7acd3f83e (patch)
treee5adcd4caac67e4e570183b13a17d49b4821d884
parentbfc887f391fde6de9d088039509f6e3eaa40b3ca (diff)
downloadruby-8d3ff663899211c9c0ca2a8cf7d994e7acd3f83e.tar.gz
Enable `clippy` checks for yjit in CI (#7093)
* Add job to check clippy lints in CI * Address all remaining clippy lints * Check lints on arm64 as well * Apply latest clippy lints * Do not exit 0 on clippy warnings
-rw-r--r--.cirrus.yml1
-rw-r--r--.github/workflows/yjit-ubuntu.yml9
-rw-r--r--yjit/src/asm/mod.rs2
-rw-r--r--yjit/src/codegen.rs44
-rw-r--r--yjit/src/core.rs6
-rw-r--r--yjit/src/cruby.rs2
-rw-r--r--yjit/src/invariants.rs4
7 files changed, 39 insertions, 29 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 6c47159921..20c14f375c 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -131,3 +131,4 @@ yjit_task:
make_test_script: source $HOME/.cargo/env && make test RUN_OPTS="--yjit-call-threshold=1 --yjit-verify-ctx"
make_test_all_script: source $HOME/.cargo/env && make test-all RUN_OPTS="--yjit-call-threshold=1 --yjit-verify-ctx" TESTOPTS="$RUBY_TESTOPTS"
make_test_spec_script: source $HOME/.cargo/env && make test-spec RUN_OPTS="--yjit-call-threshold=1 --yjit-verify-ctx"
+ clippy_script: source $HOME/.cargo/env && cd yjit && cargo clippy --all-targets --all-features
diff --git a/.github/workflows/yjit-ubuntu.yml b/.github/workflows/yjit-ubuntu.yml
index 7226e3cbfe..fb12eb80b3 100644
--- a/.github/workflows/yjit-ubuntu.yml
+++ b/.github/workflows/yjit-ubuntu.yml
@@ -40,6 +40,15 @@ jobs:
# Check that we can build in release mode too
- run: cargo build --release
working-directory: yjit
+ lint:
+ name: Rust lint
+ # GitHub Action's image seems to already contain a Rust 1.58.0.
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
+ # Check that we don't have linting errors in release mode, too
+ - run: cargo clippy --all-targets --all-features
+ working-directory: yjit
make:
strategy:
fail-fast: false
diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs
index a5ef8dcf4f..648041bbab 100644
--- a/yjit/src/asm/mod.rs
+++ b/yjit/src/asm/mod.rs
@@ -283,6 +283,7 @@ impl CodeBlock {
/// Return the address ranges of a given address range that this CodeBlock can write.
#[cfg(any(feature = "disasm", target_arch = "aarch64"))]
+ #[allow(dead_code)]
pub fn writable_addrs(&self, start_ptr: CodePtr, end_ptr: CodePtr) -> Vec<(usize, usize)> {
// CodegenGlobals is not initialized when we write initial ocb code
let freed_pages = if CodegenGlobals::has_instance() {
@@ -356,6 +357,7 @@ impl CodeBlock {
self.asm_comments.get(&pos)
}
+ #[allow(unused_variables)]
#[cfg(feature = "disasm")]
pub fn remove_comments(&mut self, start_addr: CodePtr, end_addr: CodePtr) {
for addr in start_addr.into_usize()..end_addr.into_usize() {
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index b4c6d7c941..b8dff32c17 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -1518,7 +1518,7 @@ fn gen_get_ep(asm: &mut Assembler, level: u32) -> Opnd {
// Get the previous EP from the current EP
// See GET_PREV_EP(ep) macro
// VALUE *prev_ep = ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
- let offs = (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32);
+ let offs = (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL;
ep_opnd = asm.load(Opnd::mem(64, ep_opnd, offs));
ep_opnd = asm.and(ep_opnd, Opnd::Imm(!0x03));
}
@@ -1922,7 +1922,7 @@ fn gen_set_ivar(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc as i32, ctx);
+ handle_opt_send_shift_stack(asm, argc, ctx);
}
// Save the PC and SP because the callee may allocate
@@ -2059,7 +2059,7 @@ fn gen_get_ivar(
asm.comment("guard shape");
asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
- let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic).into();
+ let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JNE,
jit,
@@ -2280,7 +2280,7 @@ fn gen_setinstancevariable(
asm.comment("guard shape");
asm.cmp(shape_opnd, Opnd::UImm(expected_shape as u64));
- let megamorphic_side_exit = counted_exit!(ocb, side_exit, setivar_megamorphic).into();
+ let megamorphic_side_exit = counted_exit!(ocb, side_exit, setivar_megamorphic);
jit_chain_guard(
JCC_JNE,
jit,
@@ -2318,10 +2318,10 @@ fn gen_setinstancevariable(
None
};
- let dest_shape = if capa_shape.is_none() {
- unsafe { rb_shape_get_next(shape, comptime_receiver, ivar_name) }
+ let dest_shape = if let Some(capa_shape) = capa_shape {
+ unsafe { rb_shape_get_next(capa_shape, comptime_receiver, ivar_name) }
} else {
- unsafe { rb_shape_get_next(capa_shape.unwrap(), comptime_receiver, ivar_name) }
+ unsafe { rb_shape_get_next(shape, comptime_receiver, ivar_name) }
};
let new_shape_id = unsafe { rb_shape_id(dest_shape) };
@@ -3490,7 +3490,7 @@ fn gen_opt_case_dispatch(
&starting_context,
asm,
ocb,
- CASE_WHEN_MAX_DEPTH as i32,
+ CASE_WHEN_MAX_DEPTH,
side_exit,
);
@@ -4444,7 +4444,7 @@ fn gen_push_frame(
SpecVal::BlockParamProxy => {
let ep_opnd = gen_get_lep(jit, asm);
let block_handler = asm.load(
- Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32))
+ Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL)
);
asm.store(Opnd::mem(64, CFP, RUBY_OFFSET_CFP_BLOCK_CODE), block_handler);
@@ -4656,7 +4656,7 @@ fn gen_send_cfunc(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc as i32, ctx);
+ handle_opt_send_shift_stack(asm, argc, ctx);
}
// Points to the receiver operand on the stack
@@ -4722,7 +4722,7 @@ fn gen_send_cfunc(
// Copy the arguments from the stack to the C argument registers
// self is the 0th argument and is at index argc from the stack top
(0..=passed_argc).map(|i|
- Opnd::mem(64, sp, -(argc + 1 - (i as i32)) * SIZEOF_VALUE_I32)
+ Opnd::mem(64, sp, -(argc + 1 - i) * SIZEOF_VALUE_I32)
).collect()
}
// Variadic method
@@ -4849,7 +4849,7 @@ fn push_splat_args(required_args: i32, ctx: &mut Context, asm: &mut Assembler, o
let ary_opnd = asm.csel_nz(ary_opnd, heap_ptr_opnd);
- for i in 0..required_args as i32 {
+ for i in 0..required_args {
let top = ctx.stack_push(Type::Unknown);
asm.mov(top, Opnd::mem(64, ary_opnd, i * (SIZEOF_VALUE as i32)));
}
@@ -5237,7 +5237,7 @@ fn gen_send_iseq(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc as i32, ctx);
+ handle_opt_send_shift_stack(asm, argc, ctx);
}
if doing_kw_call {
@@ -5530,7 +5530,7 @@ fn gen_struct_aref(
// This is a .send call and we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc as i32, ctx);
+ handle_opt_send_shift_stack(asm, argc, ctx);
}
// All structs from the same Struct class should have the same
@@ -5920,7 +5920,7 @@ fn gen_send_general(
&starting_context,
asm,
ocb,
- SEND_MAX_CHAIN_DEPTH as i32,
+ SEND_MAX_CHAIN_DEPTH,
chain_exit,
);
@@ -5950,7 +5950,7 @@ fn gen_send_general(
// If this is a .send call we need to adjust the stack
if flags & VM_CALL_OPT_SEND != 0 {
- handle_opt_send_shift_stack(asm, argc as i32, ctx);
+ handle_opt_send_shift_stack(asm, argc, ctx);
}
// About to reset the SP, need to load this here
@@ -6110,7 +6110,7 @@ fn gen_invokeblock(
asm.comment("get local EP");
let ep_opnd = gen_get_lep(jit, asm);
let block_handler_opnd = asm.load(
- Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32))
+ Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL)
);
asm.comment("guard block_handler type");
@@ -6272,7 +6272,7 @@ fn gen_invokesuper(
let ep_me_opnd = Opnd::mem(
64,
ep_opnd,
- (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_ME_CREF as i32),
+ (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_ME_CREF,
);
asm.cmp(ep_me_opnd, me_as_value.into());
asm.jne(counted_exit!(ocb, side_exit, invokesuper_me_changed).into());
@@ -6290,7 +6290,7 @@ fn gen_invokesuper(
let ep_specval_opnd = Opnd::mem(
64,
ep_opnd,
- (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32),
+ (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL,
);
asm.cmp(ep_specval_opnd, VM_BLOCK_HANDLER_NONE.into());
asm.jne(counted_exit!(ocb, side_exit, invokesuper_block).into());
@@ -6354,7 +6354,7 @@ fn gen_leave(
// Jump to the JIT return address on the frame that was just popped
let offset_to_jit_return =
- -(RUBY_SIZEOF_CONTROL_FRAME as i32) + (RUBY_OFFSET_CFP_JIT_RETURN as i32);
+ -(RUBY_SIZEOF_CONTROL_FRAME as i32) + RUBY_OFFSET_CFP_JIT_RETURN;
asm.jmp_opnd(Opnd::mem(64, CFP, offset_to_jit_return));
EndBlock
@@ -6815,7 +6815,7 @@ fn gen_getblockparamproxy(
// Load the block handler for the current frame
// note, VM_ASSERT(VM_ENV_LOCAL_P(ep))
let block_handler = asm.load(
- Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32))
+ Opnd::mem(64, ep_opnd, (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL)
);
// Specialize compilation for the case where no block handler is present
@@ -6918,7 +6918,7 @@ fn gen_getblockparam(
Opnd::mem(
64,
ep_opnd,
- (SIZEOF_VALUE as i32) * (VM_ENV_DATA_INDEX_SPECVAL as i32),
+ (SIZEOF_VALUE as i32) * VM_ENV_DATA_INDEX_SPECVAL,
),
]
);
diff --git a/yjit/src/core.rs b/yjit/src/core.rs
index 9a796bcb87..c3e2bd8c6c 100644
--- a/yjit/src/core.rs
+++ b/yjit/src/core.rs
@@ -543,7 +543,8 @@ pub fn get_or_create_iseq_payload(iseq: IseqPtr) -> &'static mut IseqPayload {
// We drop the payload with Box::from_raw when the GC frees the iseq and calls us.
// NOTE(alan): Sometimes we read from an iseq without ever writing to it.
// We allocate in those cases anyways.
- let new_payload = Box::into_raw(Box::new(IseqPayload::default()));
+ let new_payload = IseqPayload::default();
+ let new_payload = Box::into_raw(Box::new(new_payload));
rb_iseq_set_yjit_payload(iseq, new_payload as VoidPtr);
new_payload
@@ -1183,7 +1184,6 @@ impl Context {
match opnd {
SelfOpnd => self.self_type,
StackOpnd(idx) => {
- let idx = idx as u16;
assert!(idx < self.stack_size);
let stack_idx: usize = (self.stack_size - 1 - idx).into();
@@ -1224,7 +1224,6 @@ impl Context {
match opnd {
SelfOpnd => self.self_type.upgrade(opnd_type),
StackOpnd(idx) => {
- let idx = idx as u16;
assert!(idx < self.stack_size);
let stack_idx = (self.stack_size - 1 - idx) as usize;
@@ -1259,7 +1258,6 @@ impl Context {
match opnd {
SelfOpnd => (MapToSelf, opnd_type),
StackOpnd(idx) => {
- let idx = idx as u16;
assert!(idx < self.stack_size);
let stack_idx = (self.stack_size - 1 - idx) as usize;
diff --git a/yjit/src/cruby.rs b/yjit/src/cruby.rs
index ba09d4119f..ee6fa14031 100644
--- a/yjit/src/cruby.rs
+++ b/yjit/src/cruby.rs
@@ -450,7 +450,7 @@ impl VALUE {
pub fn as_usize(self) -> usize {
let VALUE(us) = self;
- us as usize
+ us
}
pub fn as_ptr<T>(self) -> *const T {
diff --git a/yjit/src/invariants.rs b/yjit/src/invariants.rs
index 6ad87b9d85..734b32c464 100644
--- a/yjit/src/invariants.rs
+++ b/yjit/src/invariants.rs
@@ -131,7 +131,7 @@ pub fn assume_method_lookup_stable(
.cme_validity
.entry(callee_cme)
.or_default()
- .insert(block.clone());
+ .insert(block);
}
// Checks rb_method_basic_definition_p and registers the current block for invalidation if method
@@ -434,7 +434,7 @@ pub extern "C" fn rb_yjit_constant_ic_update(iseq: *const rb_iseq_t, ic: IC, ins
// This should come from a running iseq, so direct threading translation
// should have been done
- assert!(unsafe { FL_TEST(iseq.into(), VALUE(ISEQ_TRANSLATED as usize)) } != VALUE(0));
+ assert!(unsafe { FL_TEST(iseq.into(), VALUE(ISEQ_TRANSLATED)) } != VALUE(0));
assert!(insn_idx < unsafe { get_iseq_encoded_size(iseq) });
// Ensure that the instruction the insn_idx is pointing to is in