diff options
Diffstat (limited to 'arch/tile')
35 files changed, 318 insertions, 221 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4f3006b600e3..7cca41842a9e 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -128,13 +128,13 @@ config TILEGX select SPARSE_IRQ select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_ARCH_KGDB + select ARCH_SUPPORTS_ATOMIC_RMW config TILEPRO def_bool !TILEGX diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 730e40d9cf62..91de7dd7427f 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -170,7 +170,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_ATA_OVER_ETH=m CONFIG_RAID_ATTRS=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index 80fc32ed0491..c7702b7ab7a5 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -301,7 +301,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_ATA_OVER_ETH=m CONFIG_RAID_ATTRS=m -CONFIG_SCSI_TGT=m CONFIG_BLK_DEV_SD=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index 5301a9ffbae1..320ff5e6e61e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -29,6 +29,32 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow +/** + * strscpy - Copy a C-string into a sized buffer, but only if it fits + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Use this routine to avoid copying too-long strings. + * The routine returns the total number of bytes copied + * (including the trailing NUL) or zero if the buffer wasn't + * big enough. To ensure that programmers pay attention + * to the return code, the destination has a single NUL + * written at the front (if size is non-zero) when the + * buffer is not big enough. + */ +static size_t strscpy(char *dest, const char *src, size_t size) +{ + size_t len = strnlen(src, size) + 1; + if (len > size) { + if (size) + dest[0] = '\0'; + return 0; + } + memcpy(dest, src, len); + return len; +} + int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -511,8 +537,8 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - strncpy(name.name, link_name, sizeof(name.name)); - name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0'; + if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); } @@ -529,7 +555,8 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - strncpy(link_name, name.name, sizeof(name.name)); + if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -545,8 +572,8 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - strncpy(name.name, link_name, sizeof(name.name)); - name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0'; + if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); if (rv < 0) diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 0aa5675e7025..e6462b8a6284 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -17,6 +17,7 @@ generic-y += ioctl.h generic-y += ioctls.h generic-y += ipcbuf.h generic-y += irq_regs.h +generic-y += irq_work.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h index ffd4493efc78..c14e36f008c8 100644 --- a/arch/tile/include/asm/compat.h +++ b/arch/tile/include/asm/compat.h @@ -267,8 +267,7 @@ static inline int is_compat_task(void) return current_thread_info()->status & TS_COMPAT; } -extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, - siginfo_t *info, sigset_t *set, +extern int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs); /* Compat syscalls. */ diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h index 2f572b6b7bc2..44d2765bde2b 100644 --- a/arch/tile/include/asm/hardwall.h +++ b/arch/tile/include/asm/hardwall.h @@ -23,7 +23,7 @@ struct proc_dir_entry; #ifdef CONFIG_HARDWALL void proc_tile_hardwall_init(struct proc_dir_entry *root); -int proc_pid_hardwall(struct task_struct *task, char *buffer); +int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task); #else static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} #endif diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 71af5747874d..60d62a292fce 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void); /* * Read the set of maskable interrupts. - * We avoid the preemption warning here via __this_cpu_ptr since even + * We avoid the preemption warning here via raw_cpu_ptr since even * if irqs are already enabled, it's harmless to read the wrong cpu's * enabled mask. */ #define arch_local_irqs_enabled() \ - (*__this_cpu_ptr(&interrupts_enabled_mask)) + (*raw_cpu_ptr(&interrupts_enabled_mask)) /* Re-enable all maskable interrupts. */ #define arch_local_irq_enable() \ diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 4734215e2ad4..f67753db1f78 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h @@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) * clear any pending DMA interrupts. */ if (current->thread.tile_dma_state.enabled) - install_page_table(mm->pgd, __get_cpu_var(current_asid)); + install_page_table(mm->pgd, __this_cpu_read(current_asid)); #endif } @@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, int cpu = smp_processor_id(); /* Pick new ASID. */ - int asid = __get_cpu_var(current_asid) + 1; + int asid = __this_cpu_read(current_asid) + 1; if (asid > max_asid) { asid = min_asid; local_flush_tlb(); } - __get_cpu_var(current_asid) = asid; + __this_cpu_write(current_asid, asid); /* Clear cpu from the old mm, and set it in the new one. */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 672768008618..a213a8d84a95 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -39,12 +39,6 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1)) /* - * We do define AT_SYSINFO_EHDR to support vDSO, - * but don't use the gate mechanism. - */ -#define __HAVE_ARCH_GATE_AREA 1 - -/* * If the Kconfig doesn't specify, set a maximum zone order that * is enough so that we can create huge pages from small pages given * the respective sizes of the two page types. See <linux/mmzone.h>. diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 42323636c459..dd4f9f17e30a 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -266,6 +266,8 @@ static inline void cpu_relax(void) barrier(); } +#define cpu_relax_lowlatency() cpu_relax() + /* Info on this processor (see fs/proc/cpuinfo.c) */ struct seq_operations; extern const struct seq_operations cpuinfo_op; diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h index 5d5d3b739a6b..86a746243dc8 100644 --- a/arch/tile/include/asm/sections.h +++ b/arch/tile/include/asm/sections.h @@ -19,9 +19,6 @@ #include <asm-generic/sections.h> -/* Text and data are at different areas in the kernel VA space. */ -extern char _sinitdata[], _einitdata[]; - /* Write-once data is writable only till the end of initialization. */ extern char __w1data_begin[], __w1data_end[]; diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h index 9f6a78d665fa..9b069692153f 100644 --- a/arch/tile/include/asm/vdso.h +++ b/arch/tile/include/asm/vdso.h @@ -15,6 +15,7 @@ #ifndef __TILE_VDSO_H__ #define __TILE_VDSO_H__ +#include <linux/seqlock.h> #include <linux/types.h> /* @@ -26,15 +27,20 @@ */ struct vdso_data { - __u64 tz_update_count; /* Timezone atomicity ctr */ - __u64 tb_update_count; /* Timebase atomicity ctr */ - __u64 xtime_tod_stamp; /* TOD clock for xtime */ - __u64 xtime_clock_sec; /* Kernel time second */ - __u64 xtime_clock_nsec; /* Kernel time nanosecond */ - __u64 wtom_clock_sec; /* Wall to monotonic clock second */ - __u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */ + seqcount_t tz_seq; /* Timezone seqlock */ + seqcount_t tb_seq; /* Timebase seqlock */ + __u64 cycle_last; /* TOD clock for xtime */ + __u64 mask; /* Cycle mask */ __u32 mult; /* Cycle to nanosecond multiplier */ __u32 shift; /* Cycle to nanosecond divisor (power of two) */ + __u64 wall_time_sec; + __u64 wall_time_snsec; + __u64 monotonic_time_sec; + __u64 monotonic_time_snsec; + __u64 wall_time_coarse_sec; + __u64 wall_time_coarse_nsec; + __u64 monotonic_time_coarse_sec; + __u64 monotonic_time_coarse_nsec; __u32 tz_minuteswest; /* Minutes west of Greenwich */ __u32 tz_dsttime; /* Type of dst correction */ }; diff --git a/arch/tile/include/uapi/arch/sim_def.h b/arch/tile/include/uapi/arch/sim_def.h index 4b44a2b6a09a..1c069537ae41 100644 --- a/arch/tile/include/uapi/arch/sim_def.h +++ b/arch/tile/include/uapi/arch/sim_def.h @@ -360,19 +360,19 @@ * @{ */ -/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */ +/** Use with SIM_PROFILER_CHIP_xxx to control the memory controllers. */ #define SIM_CHIP_MEMCTL 0x001 -/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */ +/** Use with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */ #define SIM_CHIP_XAUI 0x002 -/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */ +/** Use with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */ #define SIM_CHIP_PCIE 0x004 -/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */ +/** Use with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */ #define SIM_CHIP_MPIPE 0x008 -/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */ +/** Use with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */ #define SIM_CHIP_TRIO 0x010 /** Reference all chip devices. */ diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 19c04b5ce408..8c5abf2e4794 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -190,18 +190,18 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka, return (void __user *) sp; } -int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) +int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) { unsigned long restorer; struct compat_rt_sigframe __user *frame; - int err = 0; + int err = 0, sig = ksig->sig; int usig; - frame = compat_get_sigframe(ka, regs, sizeof(*frame)); + frame = compat_get_sigframe(&ksig->ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; + goto err; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap @@ -210,12 +210,12 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, : sig; /* Always write at least the signal number for the stack backtracer. */ - if (ka->sa.sa_flags & SA_SIGINFO) { + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ - err |= copy_siginfo_to_user32(&frame->info, info); + err |= copy_siginfo_to_user32(&frame->info, &ksig->info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { - err |= __put_user(info->si_signo, &frame->info.si_signo); + err |= __put_user(ksig->info.si_signo, &frame->info.si_signo); } /* Create the ucontext. */ @@ -226,11 +226,11 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) - goto give_sigsegv; + goto err; restorer = VDSO_SYM(&__vdso_rt_sigreturn); - if (ka->sa.sa_flags & SA_RESTORER) - restorer = ptr_to_compat_reg(ka->sa.sa_restorer); + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = ptr_to_compat_reg(ksig->ka.sa.sa_restorer); /* * Set up registers for signal handler. @@ -239,7 +239,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ - regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); + regs->pc = ptr_to_compat_reg(ksig->ka.sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; @@ -249,7 +249,8 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->flags |= PT_FLAGS_CALLER_SAVES; return 0; -give_sigsegv: - signal_fault("bad setup frame", regs, frame, sig); +err: + trace_unhandled_signal("bad sigreturn frame", regs, + (unsigned long)frame, SIGSEGV); return -EFAULT; } diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index 531f4c365351..aca6000bca75 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -947,15 +947,15 @@ static void hardwall_remove_proc(struct hardwall_info *info) remove_proc_entry(buf, info->type->proc_dir); } -int proc_pid_hardwall(struct task_struct *task, char *buffer) +int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) { int i; int n = 0; for (i = 0; i < HARDWALL_TYPES; ++i) { struct hardwall_info *info = task->thread.hardwall[i].info; if (info) - n += sprintf(&buffer[n], "%s: %d\n", - info->type->name, info->id); + seq_printf(m, "%s: %d\n", info->type->name, info->id); } return n; } diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 637f2ffaa5f5..ba85765e1436 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth); */ void tile_dev_intr(struct pt_regs *regs, int intnum) { - int depth = __get_cpu_var(irq_depth)++; + int depth = __this_cpu_inc_return(irq_depth); unsigned long original_irqs; unsigned long remaining_irqs; struct pt_regs *old_regs; @@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) /* Count device irqs; Linux IPIs are counted elsewhere. */ if (irq != IRQ_RESCHEDULE) - __get_cpu_var(irq_stat).irq_dev_intr_count++; + __this_cpu_inc(irq_stat.irq_dev_intr_count); generic_handle_irq(irq); } @@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) * including any that were reenabled during interrupt * handling. */ - if (depth == 0) - unmask_irqs(~__get_cpu_var(irq_disable_mask)); + if (depth == 1) + unmask_irqs(~__this_cpu_read(irq_disable_mask)); - __get_cpu_var(irq_depth)--; + __this_cpu_dec(irq_depth); /* * Track time spent against the current process again and @@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) static void tile_irq_chip_enable(struct irq_data *d) { get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); - if (__get_cpu_var(irq_depth) == 0) + if (__this_cpu_read(irq_depth) == 0) unmask_irqs(1UL << d->irq); put_cpu_var(irq_disable_mask); } @@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d) */ static void tile_irq_chip_eoi(struct irq_data *d) { - if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) + if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) unmask_irqs(1UL << d->irq); } diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S index 70d7bb0c4d8f..3c2b8d5e1d1a 100644 --- a/arch/tile/kernel/mcount_64.S +++ b/arch/tile/kernel/mcount_64.S @@ -77,15 +77,6 @@ STD_ENDPROC(__mcount) .align 64 STD_ENTRY(ftrace_caller) - moveli r11, hw2_last(function_trace_stop) - { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } - { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } - ld r11, r11 - beqz r11, 1f - jrp r12 - -1: - { move r10, lr; move lr, r12 } MCOUNT_SAVE_REGS /* arg1: self return address */ @@ -119,15 +110,6 @@ STD_ENDPROC(ftrace_caller) .align 64 STD_ENTRY(__mcount) - moveli r11, hw2_last(function_trace_stop) - { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } - { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } - ld r11, r11 - beqz r11, 1f - jrp r12 - -1: - { move r10, lr; move lr, r12 } { moveli r11, hw2_last(ftrace_trace_function) moveli r13, hw2_last(ftrace_stub) diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index 7867266f9716..ac950be1318e 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state); void init_messaging(void) { /* Allocate storage for messages in kernel space */ - HV_MsgState *state = &__get_cpu_var(msg_state); + HV_MsgState *state = this_cpu_ptr(&msg_state); int rc = hv_register_message_state(state); if (rc != HV_OK) panic("hv_register_message_state: error %d", rc); @@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum) struct hv_driver_cb *cb = (struct hv_driver_cb *)him->intarg; cb->callback(cb, him->intdata); - __get_cpu_var(irq_stat).irq_hv_msg_count++; + __this_cpu_inc(irq_stat.irq_hv_msg_count); } } diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index 4918d91bc3a6..d19b13e3a59f 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -58,7 +58,7 @@ void *module_alloc(unsigned long size) area->nr_pages = npages; area->pages = pages; - if (map_vm_area(area, prot_rwx, &pages)) { + if (map_vm_area(area, prot_rwx, pages)) { vunmap(area->addr); goto error; } diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c index 2bf6c9c135c1..bb509cee3b59 100644 --- a/arch/tile/kernel/perf_event.c +++ b/arch/tile/kernel/perf_event.c @@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event) */ static void tile_pmu_stop(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags) */ static void tile_pmu_start(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int idx = event->hw.idx; if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) @@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags) */ static int tile_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc; unsigned long mask; int b, max_cnt; @@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags) */ static void tile_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); int i; /* @@ -880,14 +880,14 @@ static struct pmu tilera_pmu = { int tile_pmu_handle_irq(struct pt_regs *regs, int fault) { struct perf_sample_data data; - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct perf_event *event; struct hw_perf_event *hwc; u64 val; unsigned long status; int bit; - __get_cpu_var(perf_irqs)++; + __this_cpu_inc(perf_irqs); if (!atomic_read(&tile_active_events)) return 0; diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 16ed58948757..0050cbc1d9de 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -64,7 +64,7 @@ early_param("idle", idle_setup); void arch_cpu_idle(void) { - __get_cpu_var(irq_stat).idle_timestamp = jiffies; + __this_cpu_write(irq_stat.idle_timestamp, jiffies); _cpu_idle(); } diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 112ababa9e55..b9736ded06f2 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1218,7 +1218,8 @@ static void __init validate_hv(void) * various asid variables to their appropriate initial states. */ asid_range = hv_inquire_asid(0); - __get_cpu_var(current_asid) = min_asid = asid_range.start; + min_asid = asid_range.start; + __this_cpu_write(current_asid, min_asid); max_asid = asid_range.start + asid_range.size - 1; if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index d1d026f01267..7c2fecc52177 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -153,18 +153,18 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, return (void __user *) sp; } -static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) +static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) { unsigned long restorer; struct rt_sigframe __user *frame; - int err = 0; + int err = 0, sig = ksig->sig; int usig; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(&ksig->ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; + goto err; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap @@ -173,12 +173,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, : sig; /* Always write at least the signal number for the stack backtracer. */ - if (ka->sa.sa_flags & SA_SIGINFO) { + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { /* At sigreturn time, restore the callee-save registers too. */ - err |= copy_siginfo_to_user(&frame->info, info); + err |= copy_siginfo_to_user(&frame->info, &ksig->info); regs->flags |= PT_FLAGS_RESTORE_REGS; } else { - err |= __put_user(info->si_signo, &frame->info.si_signo); + err |= __put_user(ksig->info.si_signo, &frame->info.si_signo); } /* Create the ucontext. */ @@ -189,11 +189,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) - goto give_sigsegv; + goto err; restorer = VDSO_SYM(&__vdso_rt_sigreturn); - if (ka->sa.sa_flags & SA_RESTORER) - restorer = (unsigned long) ka->sa.sa_restorer; + if (ksig->ka.sa.sa_flags & SA_RESTORER) + restorer = (unsigned long) ksig->ka.sa.sa_restorer; /* * Set up registers for signal handler. @@ -202,7 +202,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ - regs->pc = (unsigned long) ka->sa.sa_handler; + regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = (unsigned long) frame; regs->lr = restorer; @@ -212,8 +212,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->flags |= PT_FLAGS_CALLER_SAVES; return 0; -give_sigsegv: - signal_fault("bad setup frame", regs, frame, sig); +err: + trace_unhandled_signal("bad sigreturn frame", regs, + (unsigned long)frame, SIGSEGV); return -EFAULT; } @@ -221,9 +222,7 @@ give_sigsegv: * OK, we're invoking a handler */ -static void handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, - struct pt_regs *regs) +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; @@ -238,7 +237,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, break; case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { + if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->regs[0] = -EINTR; break; } @@ -254,14 +253,12 @@ static void handle_signal(unsigned long sig, siginfo_t *info, /* Set up the stack frame */ #ifdef CONFIG_COMPAT if (is_compat_task()) - ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); + ret = compat_setup_rt_frame(ksig, oldset, regs); else #endif - ret = setup_rt_frame(sig, ka, info, oldset, regs); - if (ret) - return; - signal_delivered(sig, info, ka, regs, - test_thread_flag(TIF_SINGLESTEP)); + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* @@ -271,9 +268,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info, */ void do_signal(struct pt_regs *regs) { - siginfo_t info; - int signr; - struct k_sigaction ka; + struct ksignal ksig; /* * i386 will check if we're coming from kernel mode and bail out @@ -282,10 +277,9 @@ void do_signal(struct pt_regs *regs) * helpful, we can reinstate the check on "!user_mode(regs)". */ - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { + if (get_signal(&ksig)) { /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, regs); + handle_signal(&ksig, regs); goto done; } diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index de07fa7d1315..6cb2ce31b5a2 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc); void gx_singlestep_handle(struct pt_regs *regs, int fault_num) { - unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); + unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); struct thread_info *info = (void *)current_thread_info(); int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); @@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num) void single_step_once(struct pt_regs *regs) { - unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); + unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc); unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); *ss_pc = regs->pc; diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 01e8ab29f43a..d3c4ed780ce2 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -183,12 +183,13 @@ void flush_icache_range(unsigned long start, unsigned long end) preempt_enable(); } } +EXPORT_SYMBOL(flush_icache_range); /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ static irqreturn_t handle_reschedule_ipi(int irq, void *token) { - __get_cpu_var(irq_stat).irq_resched_count++; + __this_cpu_inc(irq_stat.irq_resched_count); scheduler_ipi(); return IRQ_HANDLED; diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 732e9d138661..0d59a1b60c74 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void) int cpu = smp_processor_id(); set_cpu_online(cpu, 1); set_cpu_present(cpu, 1); - __get_cpu_var(cpu_state) = CPU_ONLINE; + __this_cpu_write(cpu_state, CPU_ONLINE); init_messaging(); } @@ -158,7 +158,7 @@ static void start_secondary(void) /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ /* Initialize the current asid for our first page table. */ - __get_cpu_var(current_asid) = min_asid; + __this_cpu_write(current_asid, min_asid); /* Set up this thread as another owner of the init_mm */ atomic_inc(&init_mm.mm_count); @@ -201,7 +201,7 @@ void online_secondary(void) notify_cpu_starting(smp_processor_id()); set_cpu_online(smp_processor_id(), 1); - __get_cpu_var(cpu_state) = CPU_ONLINE; + __this_cpu_write(cpu_state, CPU_ONLINE); /* Set up tile-specific state for this cpu. */ setup_cpu(0); diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 462dcd0c1700..b854a1cd0079 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { void setup_tile_timer(void) { - struct clock_event_device *evt = &__get_cpu_var(tile_timer); + struct clock_event_device *evt = this_cpu_ptr(&tile_timer); /* Fill in fields that are speed-specific. */ clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); @@ -182,7 +182,7 @@ void setup_tile_timer(void) void do_timer_interrupt(struct pt_regs *regs, int fault_num) { struct pt_regs *old_regs = set_irq_regs(regs); - struct clock_event_device *evt = &__get_cpu_var(tile_timer); + struct clock_event_device *evt = this_cpu_ptr(&tile_timer); /* * Mask the timer interrupt here, since we are a oneshot timer @@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) irq_enter(); /* Track interrupt count. */ - __get_cpu_var(irq_stat).irq_timer_count++; + __this_cpu_inc(irq_stat.irq_timer_count); /* Call the generic timer handler */ evt->event_handler(evt); @@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs) * We do not have to disable preemption here as each core has the same * clock frequency. */ - struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); + struct clock_event_device *dev = raw_cpu_ptr(&tile_timer); /* * as in clocksource.h and x86's timer.h, we split the calculation @@ -249,34 +249,52 @@ cycles_t ns2cycles(unsigned long nsecs) void update_vsyscall_tz(void) { - /* Userspace gettimeofday will spin while this value is odd. */ - ++vdso_data->tz_update_count; - smp_wmb(); + write_seqcount_begin(&vdso_data->tz_seq); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_wmb(); - ++vdso_data->tz_update_count; + write_seqcount_end(&vdso_data->tz_seq); } void update_vsyscall(struct timekeeper *tk) { - struct timespec wall_time = tk_xtime(tk); - struct timespec *wtm = &tk->wall_to_monotonic; - struct clocksource *clock = tk->clock; - - if (clock != &cycle_counter_cs) + if (tk->tkr.clock != &cycle_counter_cs) return; - /* Userspace gettimeofday will spin while this value is odd. */ - ++vdso_data->tb_update_count; - smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; - vdso_data->xtime_clock_sec = wall_time.tv_sec; - vdso_data->xtime_clock_nsec = wall_time.tv_nsec; - vdso_data->wtom_clock_sec = wtm->tv_sec; - vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->mult = clock->mult; - vdso_data->shift = clock->shift; - smp_wmb(); - ++vdso_data->tb_update_count; + write_seqcount_begin(&vdso_data->tb_seq); + + vdso_data->cycle_last = tk->tkr.cycle_last; + vdso_data->mask = tk->tkr.mask; + vdso_data->mult = tk->tkr.mult; + vdso_data->shift = tk->tkr.shift; + + vdso_data->wall_time_sec = tk->xtime_sec; + vdso_data->wall_time_snsec = tk->tkr.xtime_nsec; + + vdso_data->monotonic_time_sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec + + ((u64)tk->wall_to_monotonic.tv_nsec + << tk->tkr.shift); + while (vdso_data->monotonic_time_snsec >= + (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { + vdso_data->monotonic_time_snsec -= + ((u64)NSEC_PER_SEC) << tk->tkr.shift; + vdso_data->monotonic_time_sec++; + } + + vdso_data->wall_time_coarse_sec = tk->xtime_sec; + vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> + tk->tkr.shift); + + vdso_data->monotonic_time_coarse_sec = + vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; + vdso_data->monotonic_time_coarse_nsec = + vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; + + while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) { + vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC; + vdso_data->monotonic_time_coarse_sec++; + } + + write_seqcount_end(&vdso_data->tb_seq); } diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index f3ceb6308e42..86900ccd4977 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -277,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */ return; if (fault_num >= 0 && - fault_num < sizeof(int_name)/sizeof(int_name[0]) && + fault_num < ARRAY_SIZE(int_name) && int_name[fault_num] != NULL) name = int_name[fault_num]; else diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c index 1533af24106e..5bc51d7dfdcb 100644 --- a/arch/tile/kernel/vdso.c +++ b/arch/tile/kernel/vdso.c @@ -121,21 +121,6 @@ const char *arch_vma_name(struct vm_area_struct *vma) return NULL; } -struct vm_area_struct *get_gate_vma(struct mm_struct *mm) -{ - return NULL; -} - -int in_gate_area(struct mm_struct *mm, unsigned long address) -{ - return 0; -} - -int in_gate_area_no_mm(unsigned long address) -{ - return 0; -} - int setup_vdso_pages(void) { struct page **pagelist; diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S index 041cd6c39c83..731529f3f06f 100644 --- a/arch/tile/kernel/vdso/vdso.lds.S +++ b/arch/tile/kernel/vdso/vdso.lds.S @@ -82,6 +82,8 @@ VERSION __vdso_rt_sigreturn; __vdso_gettimeofday; gettimeofday; + __vdso_clock_gettime; + clock_gettime; local:*; }; } diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c index 51ec8e46f5f9..8bb21eda07d8 100644 --- a/arch/tile/kernel/vdso/vgettimeofday.c +++ b/arch/tile/kernel/vdso/vgettimeofday.c @@ -15,6 +15,7 @@ #define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */ #include <linux/time.h> #include <asm/timex.h> +#include <asm/unistd.h> #include <asm/vdso.h> #if CHIP_HAS_SPLIT_CYCLE() @@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void) #define get_cycles get_cycles_inline #endif +struct syscall_return_value { + long value; + long error; +}; + /* * Find out the vDSO data page address in the process address space. */ @@ -50,58 +56,143 @@ inline unsigned long get_datapage(void) return ret; } -int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) +static inline u64 vgetsns(struct vdso_data *vdso) +{ + return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult; +} + +static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts) +{ + unsigned count; + u64 ns; + + do { + count = read_seqcount_begin(&vdso->tb_seq); + ts->tv_sec = vdso->wall_time_sec; + ns = vdso->wall_time_snsec; + ns += vgetsns(vdso); + ns >>= vdso->shift; + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); + + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts) +{ + unsigned count; + u64 ns; + + do { + count = read_seqcount_begin(&vdso->tb_seq); + ts->tv_sec = vdso->monotonic_time_sec; + ns = vdso->monotonic_time_snsec; + ns += vgetsns(vdso); + ns >>= vdso->shift; + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); + + ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); + ts->tv_nsec = ns; + + return 0; +} + +static inline int do_realtime_coarse(struct vdso_data *vdso, + struct timespec *ts) +{ + unsigned count; + + do { + count = read_seqcount_begin(&vdso->tb_seq); + ts->tv_sec = vdso->wall_time_coarse_sec; + ts->tv_nsec = vdso->wall_time_coarse_nsec; + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); + + return 0; +} + +static inline int do_monotonic_coarse(struct vdso_data *vdso, + struct timespec *ts) { - cycles_t cycles; - unsigned long count, sec, ns; - volatile struct vdso_data *vdso_data; + unsigned count; + + do { + count = read_seqcount_begin(&vdso->tb_seq); + ts->tv_sec = vdso->monotonic_time_coarse_sec; + ts->tv_nsec = vdso->monotonic_time_coarse_nsec; + } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); + + return 0; +} + +struct syscall_return_value __vdso_gettimeofday(struct timeval *tv, + struct timezone *tz) +{ + struct syscall_return_value ret = { 0, 0 }; + unsigned count; + struct vdso_data *vdso = (struct vdso_data *)get_datapage(); - vdso_data = (struct vdso_data *)get_datapage(); /* The use of the timezone is obsolete, normally tz is NULL. */ if (unlikely(tz != NULL)) { - while (1) { - /* Spin until the update finish. */ - count = vdso_data->tz_update_count; - if (count & 1) - continue; - - tz->tz_minuteswest = vdso_data->tz_minuteswest; - tz->tz_dsttime = vdso_data->tz_dsttime; - - /* Check whether updated, read again if so. */ - if (count == vdso_data->tz_update_count) - break; - } + do { + count = read_seqcount_begin(&vdso->tz_seq); + tz->tz_minuteswest = vdso->tz_minuteswest; + tz->tz_dsttime = vdso->tz_dsttime; + } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count))); } if (unlikely(tv == NULL)) - return 0; - - while (1) { - /* Spin until the update finish. */ - count = vdso_data->tb_update_count; - if (count & 1) - continue; - - cycles = (get_cycles() - vdso_data->xtime_tod_stamp); - ns = (cycles * vdso_data->mult) >> vdso_data->shift; - sec = vdso_data->xtime_clock_sec; - ns += vdso_data->xtime_clock_nsec; - if (ns >= NSEC_PER_SEC) { - ns -= NSEC_PER_SEC; - sec += 1; - } - - /* Check whether updated, read again if so. */ - if (count == vdso_data->tb_update_count) - break; - } + return ret; - tv->tv_sec = sec; - tv->tv_usec = ns / 1000; + do_realtime(vdso, (struct timespec *)tv); + tv->tv_usec /= 1000; - return 0; + return ret; } int gettimeofday(struct timeval *tv, struct timezone *tz) __attribute__((weak, alias("__vdso_gettimeofday"))); + +static struct syscall_return_value vdso_fallback_gettime(long clock, + struct timespec *ts) +{ + struct syscall_return_value ret; + __asm__ __volatile__ ( + "swint1" + : "=R00" (ret.value), "=R01" (ret.error) + : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts) + : "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "memory"); + return ret; +} + +struct syscall_return_value __vdso_clock_gettime(clockid_t clock, + struct timespec *ts) +{ + struct vdso_data *vdso = (struct vdso_data *)get_datapage(); + struct syscall_return_value ret = { 0, 0 }; + + switch (clock) { + case CLOCK_REALTIME: + do_realtime(vdso, ts); + return ret; + case CLOCK_MONOTONIC: + do_monotonic(vdso, ts); + return ret; + case CLOCK_REALTIME_COARSE: + do_realtime_coarse(vdso, ts); + return ret; + case CLOCK_MONOTONIC_COARSE: + do_monotonic_coarse(vdso, ts); + return ret; + default: + return vdso_fallback_gettime(clock, ts); + } +} + +int clock_gettime(clockid_t clock, struct timespec *ts) + __attribute__((weak, alias("__vdso_clock_gettime"))); diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S index f1819423ffc9..0e059a0101ea 100644 --- a/arch/tile/kernel/vmlinux.lds.S +++ b/arch/tile/kernel/vmlinux.lds.S @@ -66,11 +66,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; - VMLINUX_SYMBOL(_sinitdata) = .; INIT_DATA_SECTION(16) :data =0 PERCPU_SECTION(L2_CACHE_BYTES) . = ALIGN(PAGE_SIZE); - VMLINUX_SYMBOL(_einitdata) = .; __init_end = .; _sdata = .; /* Start of data section */ diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 0dc218294770..6aa2f2625447 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c @@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type, spin_lock(&_lock); /* With interrupts disabled, now fill in the per-cpu info. */ - amp = &__get_cpu_var(amps).per_type[type]; + amp = this_cpu_ptr(&s.per_type[type]); amp->page = page; amp->cpu = smp_processor_id(); amp->va = va; diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index bfb3127b4df9..caa270165f86 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -254,8 +254,8 @@ static pgprot_t __init init_pgprot(ulong address) * Everything else that isn't data or bss is heap, so mark it * with the initial heap home (hash-for-home, or this cpu). This * includes any addresses after the loaded image and any address before - * _einitdata, since we already captured the case of text before - * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). + * __init_end, since we already captured the case of text before + * _sinittext, and __pa(einittext) is approximately __pa(__init_begin). * * All the LOWMEM pages that we mark this way will get their * struct page homecache properly marked later, in set_page_homes(). @@ -263,7 +263,7 @@ static pgprot_t __init init_pgprot(ulong address) * homes, but with a zero free_time we don't have to actually * do a flush action the first time we use them, either. */ - if (address >= (ulong) _end || address < (ulong) _einitdata) + if (address >= (ulong) _end || address < (ulong) __init_end) return construct_pgprot(PAGE_KERNEL, initial_heap_home()); /* Use hash-for-home if requested for data/bss. */ @@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) interrupt_mask_set_mask(-1ULL); rc = flush_and_install_context(__pa(pgtables), init_pgprot((unsigned long)pgtables), - __get_cpu_var(current_asid), + __this_cpu_read(current_asid), cpumask_bits(my_cpu_mask)); interrupt_mask_restore_mask(irqmask); BUG_ON(rc != 0); /* Copy the page table back to the normal swapper_pg_dir. */ memcpy(pgd_base, pgtables, sizeof(pgtables)); - __install_page_table(pgd_base, __get_cpu_var(current_asid), + __install_page_table(pgd_base, __this_cpu_read(current_asid), swapper_pgprot); /* @@ -632,7 +632,7 @@ int devmem_is_allowed(unsigned long pagenr) { return pagenr < kaddr_to_pfn(_end) && !(pagenr >= kaddr_to_pfn(&init_thread_union) || - pagenr < kaddr_to_pfn(_einitdata)) && + pagenr < kaddr_to_pfn(__init_end)) && !(pagenr >= kaddr_to_pfn(_sinittext) || pagenr <= kaddr_to_pfn(_einittext-1)); } @@ -975,8 +975,8 @@ void free_initmem(void) /* Free the data pages that we won't use again after init. */ free_init_pages("unused kernel data", - (unsigned long)_sinitdata, - (unsigned long)_einitdata); + (unsigned long)__init_begin, + (unsigned long)__init_end); /* * Free the pages mapped from 0xc0000000 that correspond to code |