diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 20 | ||||
-rw-r--r-- | kernel/trace/kprobe_event_gen_test.c | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 17 | ||||
-rw-r--r-- | kernel/trace/trace.c | 30 | ||||
-rw-r--r-- | kernel/trace/trace_events_hist.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_events_synth.c | 19 | ||||
-rw-r--r-- | kernel/trace/trace_hwlat.c | 11 | ||||
-rw-r--r-- | kernel/trace/trace_osnoise.c | 14 |
8 files changed, 84 insertions, 43 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 29baa97d0d53..c67bcc89a771 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) key.flags = end; /* overload flags, as it is unsigned long */ for (pg = ftrace_pages_start; pg; pg = pg->next) { - if (end < pg->records[0].ip || + if (pg->index == 0 || + end < pg->records[0].ip || start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) continue; rec = bsearch(&key, pg->records, pg->index, @@ -2591,7 +2592,7 @@ static void call_direct_funcs(unsigned long ip, unsigned long pip, arch_ftrace_set_direct_caller(fregs, addr); } -struct ftrace_ops direct_ops = { +static struct ftrace_ops direct_ops = { .func = call_direct_funcs, .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_PERMANENT, @@ -5666,12 +5667,15 @@ int modify_ftrace_direct(unsigned long ip, ret = 0; } - if (unlikely(ret && new_direct)) { - direct->count++; - list_del_rcu(&new_direct->next); - synchronize_rcu_tasks(); - kfree(new_direct); - ftrace_direct_func_count--; + if (ret) { + direct->addr = old_addr; + if (unlikely(new_direct)) { + direct->count++; + list_del_rcu(&new_direct->next); + synchronize_rcu_tasks(); + kfree(new_direct); + ftrace_direct_func_count--; + } } out_unlock: diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c index 4850fdfe27f1..5a4b722b5045 100644 --- a/kernel/trace/kprobe_event_gen_test.c +++ b/kernel/trace/kprobe_event_gen_test.c @@ -146,7 +146,7 @@ static int __init test_gen_kprobe_cmd(void) if (trace_event_file_is_valid(gen_kprobe_test)) gen_kprobe_test = NULL; /* We got an error after creating the event, delete it */ - ret = kprobe_event_delete("gen_kprobe_test"); + kprobe_event_delete("gen_kprobe_test"); goto out; } @@ -211,7 +211,7 @@ static int __init test_gen_kretprobe_cmd(void) if (trace_event_file_is_valid(gen_kretprobe_test)) gen_kretprobe_test = NULL; /* We got an error after creating the event, delete it */ - ret = kprobe_event_delete("gen_kretprobe_test"); + kprobe_event_delete("gen_kretprobe_test"); goto out; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index af50d931b020..76a2d91eecad 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -354,10 +354,6 @@ static void rb_init_page(struct buffer_data_page *bpage) local_set(&bpage->commit, 0); } -/* - * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing - * this issue out. - */ static void free_buffer_page(struct buffer_page *bpage) { free_page((unsigned long)bpage->page); @@ -3102,6 +3098,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) if (RB_WARN_ON(cpu_buffer, rb_is_reader_page(cpu_buffer->tail_page))) return; + /* + * No need for a memory barrier here, as the update + * of the tail_page did it for this page. + */ local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); rb_inc_page(&cpu_buffer->commit_page); @@ -3111,6 +3111,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) while (rb_commit_index(cpu_buffer) != rb_page_write(cpu_buffer->commit_page)) { + /* Make sure the readers see the content of what is committed. */ + smp_wmb(); local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); RB_WARN_ON(cpu_buffer, @@ -4688,7 +4690,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) /* * Make sure we see any padding after the write update - * (see rb_reset_tail()) + * (see rb_reset_tail()). + * + * In addition, a writer may be writing on the reader page + * if the page has not been fully filled, so the read barrier + * is also needed to make sure we see the content of what is + * committed by the writer (see rb_set_commit_to_write()). */ smp_rmb(); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 45551c7b4c36..36a6037823cd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1149,22 +1149,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr, unsigned long flags; if (in_nmi()) { - internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); - internal_trace_puts("*** snapshot is being ignored ***\n"); + trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); + trace_array_puts(tr, "*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { - internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); - internal_trace_puts("*** stopping trace here! ***\n"); - tracing_off(); + trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); + trace_array_puts(tr, "*** stopping trace here! ***\n"); + tracer_tracing_off(tr); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { - internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); - internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); + trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); + trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } @@ -5167,6 +5167,8 @@ loff_t tracing_lseek(struct file *file, loff_t offset, int whence) static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, + .read_iter = seq_read_iter, + .splice_read = generic_file_splice_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, @@ -9514,6 +9516,7 @@ static int __remove_instance(struct trace_array *tr) tracefs_remove(tr->dir); free_percpu(tr->last_func_repeats); free_trace_buffers(tr); + clear_tracing_err_log(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); @@ -10391,19 +10394,20 @@ out: void __init ftrace_boot_snapshot(void) { +#ifdef CONFIG_TRACER_MAX_TRACE struct trace_array *tr; - if (snapshot_at_boot) { - tracing_snapshot(); - internal_trace_puts("** Boot snapshot taken **\n"); - } + if (!snapshot_at_boot) + return; list_for_each_entry(tr, &ftrace_trace_arrays, list) { - if (tr == &global_trace) + if (!tr->allocated_snapshot) continue; - trace_array_puts(tr, "** Boot snapshot taken **\n"); + tracing_snapshot_instance(tr); + trace_array_puts(tr, "** Boot snapshot taken **\n"); } +#endif } void __init early_trace_init(void) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 89877a18f933..486cca3c2b75 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field, { const char *field_name = ""; + if (WARN_ON_ONCE(!field)) + return field_name; + if (level > 1) return field_name; @@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data, goto out; } + /* Some types cannot be a value */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | + HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | + HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) { + hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); + ret = -EINVAL; + } + hist_data->fields[val_idx] = hist_field; ++hist_data->n_vals; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 46d0abb32d0f..d6a70aff2410 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -44,14 +44,21 @@ enum { ERRORS }; static const char *err_text[] = { ERRORS }; +static DEFINE_MUTEX(lastcmd_mutex); static char *last_cmd; static int errpos(const char *str) { + int ret = 0; + + mutex_lock(&lastcmd_mutex); if (!str || !last_cmd) - return 0; + goto out; - return err_pos(last_cmd, str); + ret = err_pos(last_cmd, str); + out: + mutex_unlock(&lastcmd_mutex); + return ret; } static void last_cmd_set(const char *str) @@ -59,18 +66,22 @@ static void last_cmd_set(const char *str) if (!str) return; + mutex_lock(&lastcmd_mutex); kfree(last_cmd); - last_cmd = kstrdup(str, GFP_KERNEL); + mutex_unlock(&lastcmd_mutex); } static void synth_err(u8 err_type, u16 err_pos) { + mutex_lock(&lastcmd_mutex); if (!last_cmd) - return; + goto out; tracing_log_err(NULL, "synthetic_events", last_cmd, err_text, err_type, err_pos); + out: + mutex_unlock(&lastcmd_mutex); } static int create_synth_event(const char *raw_command); diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index d440ddd5fd8b..2f37a6e68aa9 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -339,7 +339,7 @@ static void move_to_next_cpu(void) cpumask_clear(current_mask); cpumask_set_cpu(next_cpu, current_mask); - sched_setaffinity(0, current_mask); + set_cpus_allowed_ptr(current, current_mask); return; change_mode: @@ -446,7 +446,7 @@ static int start_single_kthread(struct trace_array *tr) } - sched_setaffinity(kthread->pid, current_mask); + set_cpus_allowed_ptr(kthread, current_mask); kdata->kthread = kthread; wake_up_process(kthread); @@ -492,6 +492,10 @@ static int start_cpu_kthread(unsigned int cpu) { struct task_struct *kthread; + /* Do not start a new hwlatd thread if it is already running */ + if (per_cpu(hwlat_per_cpu_data, cpu).kthread) + return 0; + kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u"); if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); @@ -584,9 +588,6 @@ static int start_per_cpu_kthreads(struct trace_array *tr) */ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); - for_each_online_cpu(cpu) - per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL; - for_each_cpu(cpu, current_mask) { retval = start_cpu_kthread(cpu); if (retval) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 04f0fdae19a1..4496975f2029 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -217,7 +217,7 @@ struct osnoise_variables { /* * Per-cpu runtime information. */ -DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var); +static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var); /* * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU @@ -240,7 +240,7 @@ struct timerlat_variables { u64 count; }; -DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var); +static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var); /* * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU @@ -332,7 +332,7 @@ struct timerlat_sample { /* * Protect the interface. */ -struct mutex interface_lock; +static struct mutex interface_lock; /* * Tracer data. @@ -1296,7 +1296,7 @@ static void notify_new_max_latency(u64 latency) rcu_read_lock(); list_for_each_entry_rcu(inst, &osnoise_instances, list) { tr = inst->tr; - if (tr->max_latency < latency) { + if (tracer_tracing_is_on(tr) && tr->max_latency < latency) { tr->max_latency = latency; latency_fsnotify(tr); } @@ -1738,6 +1738,8 @@ static int timerlat_main(void *data) trace_timerlat_sample(&s); + notify_new_max_latency(diff); + timerlat_dump_stack(time_to_us(diff)); tlat->tracing_thread = false; @@ -2239,8 +2241,8 @@ static struct trace_min_max_param osnoise_print_stack = { /* * osnoise/timerlat_period: min 100 us, max 1 s */ -u64 timerlat_min_period = 100; -u64 timerlat_max_period = 1000000; +static u64 timerlat_min_period = 100; +static u64 timerlat_max_period = 1000000; static struct trace_min_max_param timerlat_period = { .lock = &interface_lock, .val = &osnoise_data.timerlat_period, |