summaryrefslogtreecommitdiff
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/domain.c78
-rw-r--r--arch/ia64/kernel/jprobes.S61
-rw-r--r--arch/ia64/kernel/kprobes.c601
-rw-r--r--arch/ia64/kernel/smpboot.c4
-rw-r--r--arch/ia64/kernel/traps.c33
6 files changed, 745 insertions, 33 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 4c73d8ba2e3d..b2e2f6509eb0 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
+obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c
index fe532c970438..d65e87b6394f 100644
--- a/arch/ia64/kernel/domain.c
+++ b/arch/ia64/kernel/domain.c
@@ -14,7 +14,7 @@
#include <linux/topology.h>
#include <linux/nodemask.h>
-#define SD_NODES_PER_DOMAIN 6
+#define SD_NODES_PER_DOMAIN 16
#ifdef CONFIG_NUMA
/**
@@ -27,7 +27,7 @@
*
* Should use nodemask_t.
*/
-static int __devinit find_next_best_node(int node, unsigned long *used_nodes)
+static int find_next_best_node(int node, unsigned long *used_nodes)
{
int i, n, val, min_val, best_node = 0;
@@ -66,7 +66,7 @@ static int __devinit find_next_best_node(int node, unsigned long *used_nodes)
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
-static cpumask_t __devinit sched_domain_node_span(int node)
+static cpumask_t sched_domain_node_span(int node)
{
int i;
cpumask_t span, nodemask;
@@ -96,7 +96,7 @@ static cpumask_t __devinit sched_domain_node_span(int node)
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static struct sched_group sched_group_cpus[NR_CPUS];
-static int __devinit cpu_to_cpu_group(int cpu)
+static int cpu_to_cpu_group(int cpu)
{
return cpu;
}
@@ -104,7 +104,7 @@ static int __devinit cpu_to_cpu_group(int cpu)
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static struct sched_group sched_group_phys[NR_CPUS];
-static int __devinit cpu_to_phys_group(int cpu)
+static int cpu_to_phys_group(int cpu)
{
#ifdef CONFIG_SCHED_SMT
return first_cpu(cpu_sibling_map[cpu]);
@@ -125,44 +125,36 @@ static struct sched_group *sched_group_nodes[MAX_NUMNODES];
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
static struct sched_group sched_group_allnodes[MAX_NUMNODES];
-static int __devinit cpu_to_allnodes_group(int cpu)
+static int cpu_to_allnodes_group(int cpu)
{
return cpu_to_node(cpu);
}
#endif
/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * Build sched domains for a given set of cpus and attach the sched domains
+ * to the individual cpus
*/
-void __devinit arch_init_sched_domains(void)
+void build_sched_domains(const cpumask_t *cpu_map)
{
int i;
- cpumask_t cpu_default_map;
/*
- * Setup mask for cpus without special case scheduling requirements.
- * For now this just excludes isolated cpus, but could be used to
- * exclude other special cases in the future.
+ * Set up domains for cpus specified by the cpu_map.
*/
- cpus_complement(cpu_default_map, cpu_isolated_map);
- cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
-
- /*
- * Set up domains. Isolated domains just stay on the dummy domain.
- */
- for_each_cpu_mask(i, cpu_default_map) {
+ for_each_cpu_mask(i, *cpu_map) {
int group;
struct sched_domain *sd = NULL, *p;
cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
- cpus_and(nodemask, nodemask, cpu_default_map);
+ cpus_and(nodemask, nodemask, *cpu_map);
#ifdef CONFIG_NUMA
if (num_online_cpus()
> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
sd = &per_cpu(allnodes_domains, i);
*sd = SD_ALLNODES_INIT;
- sd->span = cpu_default_map;
+ sd->span = *cpu_map;
group = cpu_to_allnodes_group(i);
sd->groups = &sched_group_allnodes[group];
p = sd;
@@ -173,7 +165,7 @@ void __devinit arch_init_sched_domains(void)
*sd = SD_NODE_INIT;
sd->span = sched_domain_node_span(cpu_to_node(i));
sd->parent = p;
- cpus_and(sd->span, sd->span, cpu_default_map);
+ cpus_and(sd->span, sd->span, *cpu_map);
#endif
p = sd;
@@ -190,7 +182,7 @@ void __devinit arch_init_sched_domains(void)
group = cpu_to_cpu_group(i);
*sd = SD_SIBLING_INIT;
sd->span = cpu_sibling_map[i];
- cpus_and(sd->span, sd->span, cpu_default_map);
+ cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
sd->groups = &sched_group_cpus[group];
#endif
@@ -198,9 +190,9 @@ void __devinit arch_init_sched_domains(void)
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
- for_each_cpu_mask(i, cpu_default_map) {
+ for_each_cpu_mask(i, *cpu_map) {
cpumask_t this_sibling_map = cpu_sibling_map[i];
- cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
+ cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
if (i != first_cpu(this_sibling_map))
continue;
@@ -213,7 +205,7 @@ void __devinit arch_init_sched_domains(void)
for (i = 0; i < MAX_NUMNODES; i++) {
cpumask_t nodemask = node_to_cpumask(i);
- cpus_and(nodemask, nodemask, cpu_default_map);
+ cpus_and(nodemask, nodemask, *cpu_map);
if (cpus_empty(nodemask))
continue;
@@ -222,7 +214,7 @@ void __devinit arch_init_sched_domains(void)
}
#ifdef CONFIG_NUMA
- init_sched_build_groups(sched_group_allnodes, cpu_default_map,
+ init_sched_build_groups(sched_group_allnodes, *cpu_map,
&cpu_to_allnodes_group);
for (i = 0; i < MAX_NUMNODES; i++) {
@@ -233,12 +225,12 @@ void __devinit arch_init_sched_domains(void)
cpumask_t covered = CPU_MASK_NONE;
int j;
- cpus_and(nodemask, nodemask, cpu_default_map);
+ cpus_and(nodemask, nodemask, *cpu_map);
if (cpus_empty(nodemask))
continue;
domainspan = sched_domain_node_span(i);
- cpus_and(domainspan, domainspan, cpu_default_map);
+ cpus_and(domainspan, domainspan, *cpu_map);
sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
sched_group_nodes[i] = sg;
@@ -266,7 +258,7 @@ void __devinit arch_init_sched_domains(void)
int n = (i + j) % MAX_NUMNODES;
cpus_complement(notcovered, covered);
- cpus_and(tmp, notcovered, cpu_default_map);
+ cpus_and(tmp, notcovered, *cpu_map);
cpus_and(tmp, tmp, domainspan);
if (cpus_empty(tmp))
break;
@@ -293,7 +285,7 @@ void __devinit arch_init_sched_domains(void)
#endif
/* Calculate CPU power for physical packages and nodes */
- for_each_cpu_mask(i, cpu_default_map) {
+ for_each_cpu_mask(i, *cpu_map) {
int power;
struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
@@ -359,13 +351,35 @@ next_sg:
cpu_attach_domain(sd, i);
}
}
+/*
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ */
+void arch_init_sched_domains(const cpumask_t *cpu_map)
+{
+ cpumask_t cpu_default_map;
+
+ /*
+ * Setup mask for cpus without special case scheduling requirements.
+ * For now this just excludes isolated cpus, but could be used to
+ * exclude other special cases in the future.
+ */
+ cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
+
+ build_sched_domains(&cpu_default_map);
+}
-void __devinit arch_destroy_sched_domains(void)
+void arch_destroy_sched_domains(const cpumask_t *cpu_map)
{
#ifdef CONFIG_NUMA
int i;
for (i = 0; i < MAX_NUMNODES; i++) {
+ cpumask_t nodemask = node_to_cpumask(i);
struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+ cpus_and(nodemask, nodemask, *cpu_map);
+ if (cpus_empty(nodemask))
+ continue;
+
if (sg == NULL)
continue;
sg = sg->next;
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
new file mode 100644
index 000000000000..b7fa3ccd2b0f
--- /dev/null
+++ b/arch/ia64/kernel/jprobes.S
@@ -0,0 +1,61 @@
+/*
+ * Jprobe specific operations
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-May Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> initial implementation
+ *
+ * Jprobes (a.k.a. "jump probes" which is built on-top of kprobes) allow a
+ * probe to be inserted into the beginning of a function call. The fundamental
+ * difference between a jprobe and a kprobe is the jprobe handler is executed
+ * in the same context as the target function, while the kprobe handlers
+ * are executed in interrupt context.
+ *
+ * For jprobes we initially gain control by placing a break point in the
+ * first instruction of the targeted function. When we catch that specific
+ * break, we:
+ * * set the return address to our jprobe_inst_return() function
+ * * jump to the jprobe handler function
+ *
+ * Since we fixed up the return address, the jprobe handler will return to our
+ * jprobe_inst_return() function, giving us control again. At this point we
+ * are back in the parents frame marker, so we do yet another call to our
+ * jprobe_break() function to fix up the frame marker as it would normally
+ * exist in the target function.
+ *
+ * Our jprobe_return function then transfers control back to kprobes.c by
+ * executing a break instruction using one of our reserved numbers. When we
+ * catch that break in kprobes.c, we continue like we do for a normal kprobe
+ * by single stepping the emulated instruction, and then returning execution
+ * to the correct location.
+ */
+#include <asm/asmmacro.h>
+
+ /*
+ * void jprobe_break(void)
+ */
+ENTRY(jprobe_break)
+ break.m 0x80300
+END(jprobe_break)
+
+ /*
+ * void jprobe_inst_return(void)
+ */
+GLOBAL_ENTRY(jprobe_inst_return)
+ br.call.sptk.many b0=jprobe_break
+END(jprobe_inst_return)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
new file mode 100644
index 000000000000..5978823d5c63
--- /dev/null
+++ b/arch/ia64/kernel/kprobes.c
@@ -0,0 +1,601 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/ia64/kernel/kprobes.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Intel Corporation, 2005
+ *
+ * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
+ * <anil.s.keshavamurthy@intel.com> adapted from i386
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/moduleloader.h>
+
+#include <asm/pgtable.h>
+#include <asm/kdebug.h>
+
+extern void jprobe_inst_return(void);
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE 0x00000001
+#define KPROBE_HIT_SS 0x00000002
+
+static struct kprobe *current_kprobe, *kprobe_prev;
+static unsigned long kprobe_status, kprobe_status_prev;
+static struct pt_regs jprobe_saved_regs;
+
+enum instruction_type {A, I, M, F, B, L, X, u};
+static enum instruction_type bundle_encoding[32][3] = {
+ { M, I, I }, /* 00 */
+ { M, I, I }, /* 01 */
+ { M, I, I }, /* 02 */
+ { M, I, I }, /* 03 */
+ { M, L, X }, /* 04 */
+ { M, L, X }, /* 05 */
+ { u, u, u }, /* 06 */
+ { u, u, u }, /* 07 */
+ { M, M, I }, /* 08 */
+ { M, M, I }, /* 09 */
+ { M, M, I }, /* 0A */
+ { M, M, I }, /* 0B */
+ { M, F, I }, /* 0C */
+ { M, F, I }, /* 0D */
+ { M, M, F }, /* 0E */
+ { M, M, F }, /* 0F */
+ { M, I, B }, /* 10 */
+ { M, I, B }, /* 11 */
+ { M, B, B }, /* 12 */
+ { M, B, B }, /* 13 */
+ { u, u, u }, /* 14 */
+ { u, u, u }, /* 15 */
+ { B, B, B }, /* 16 */
+ { B, B, B }, /* 17 */
+ { M, M, B }, /* 18 */
+ { M, M, B }, /* 19 */
+ { u, u, u }, /* 1A */
+ { u, u, u }, /* 1B */
+ { M, F, B }, /* 1C */
+ { M, F, B }, /* 1D */
+ { u, u, u }, /* 1E */
+ { u, u, u }, /* 1F */
+};
+
+/*
+ * In this function we check to see if the instruction
+ * is IP relative instruction and update the kprobe
+ * inst flag accordingly
+ */
+static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ p->ainsn.inst_flag = 0;
+ p->ainsn.target_br_reg = 0;
+
+ if (bundle_encoding[template][slot] == B) {
+ switch (major_opcode) {
+ case INDIRECT_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ case IP_RELATIVE_PREDICT_OPCODE:
+ case IP_RELATIVE_BRANCH_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ break;
+ case IP_RELATIVE_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ } else if (bundle_encoding[template][slot] == X) {
+ switch (major_opcode) {
+ case LONG_CALL_OPCODE:
+ p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
+ p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
+ break;
+ }
+ }
+ return;
+}
+
+/*
+ * In this function we check to see if the instruction
+ * on which we are inserting kprobe is supported.
+ * Returns 0 if supported
+ * Returns -EINVAL if unsupported
+ */
+static int unsupported_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+
+ if (bundle_encoding[template][slot] == I) {
+ switch (major_opcode) {
+ case 0x0: //I_UNIT_MISC_OPCODE:
+ /*
+ * Check for Integer speculation instruction
+ * - Bit 33-35 to be equal to 0x1
+ */
+ if (((kprobe_inst >> 33) & 0x7) == 1) {
+ printk(KERN_WARNING
+ "Kprobes on speculation inst at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /*
+ * IP relative mov instruction
+ * - Bit 27-35 to be equal to 0x30
+ */
+ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) {
+ printk(KERN_WARNING
+ "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n",
+ addr);
+ return -EINVAL;
+
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * In this function we check to see if the instruction
+ * (qp) cmpx.crel.ctype p1,p2=r2,r3
+ * on which we are inserting kprobe is cmp instruction
+ * with ctype as unc.
+ */
+static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode,
+unsigned long kprobe_inst)
+{
+ cmp_inst_t cmp_inst;
+ uint ctype_unc = 0;
+
+ if (!((bundle_encoding[template][slot] == I) ||
+ (bundle_encoding[template][slot] == M)))
+ goto out;
+
+ if (!((major_opcode == 0xC) || (major_opcode == 0xD) ||
+ (major_opcode == 0xE)))
+ goto out;
+
+ cmp_inst.l = kprobe_inst;
+ if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) {
+ /* Integere compare - Register Register (A6 type)*/
+ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0)
+ &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) {
+ /* Integere compare - Immediate Register (A8 type)*/
+ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1))
+ ctype_unc = 1;
+ }
+out:
+ return ctype_unc;
+}
+
+/*
+ * In this function we override the bundle with
+ * the break instruction at the given slot.
+ */
+static void prepare_break_inst(uint template, uint slot, uint major_opcode,
+ unsigned long kprobe_inst, struct kprobe *p)
+{
+ unsigned long break_inst = BREAK_INST;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ /*
+ * Copy the original kprobe_inst qualifying predicate(qp)
+ * to the break instruction iff !is_cmp_ctype_unc_inst
+ * because for cmp instruction with ctype equal to unc,
+ * which is a special instruction always needs to be
+ * executed regradless of qp
+ */
+ if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst))
+ break_inst |= (0x3f & kprobe_inst);
+
+ switch (slot) {
+ case 0:
+ bundle->quad0.slot0 = break_inst;
+ break;
+ case 1:
+ bundle->quad0.slot1_p0 = break_inst;
+ bundle->quad1.slot1_p1 = break_inst >> (64-46);
+ break;
+ case 2:
+ bundle->quad1.slot2 = break_inst;
+ break;
+ }
+
+ /*
+ * Update the instruction flag, so that we can
+ * emulate the instruction properly after we
+ * single step on original instruction
+ */
+ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
+}
+
+static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
+ unsigned long *kprobe_inst, uint *major_opcode)
+{
+ unsigned long kprobe_inst_p0, kprobe_inst_p1;
+ unsigned int template;
+
+ template = bundle->quad0.template;
+
+ switch (slot) {
+ case 0:
+ *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad0.slot0;
+ break;
+ case 1:
+ *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
+ kprobe_inst_p0 = bundle->quad0.slot1_p0;
+ kprobe_inst_p1 = bundle->quad1.slot1_p1;
+ *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46));
+ break;
+ case 2:
+ *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
+ *kprobe_inst = bundle->quad1.slot2;
+ break;
+ }
+}
+
+static int valid_kprobe_addr(int template, int slot, unsigned long addr)
+{
+ if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
+ printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
+ addr);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline void save_previous_kprobe(void)
+{
+ kprobe_prev = current_kprobe;
+ kprobe_status_prev = kprobe_status;
+}
+
+static inline void restore_previous_kprobe(void)
+{
+ current_kprobe = kprobe_prev;
+ kprobe_status = kprobe_status_prev;
+}
+
+static inline void set_current_kprobe(struct kprobe *p)
+{
+ current_kprobe = p;
+}
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long) p->addr;
+ unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
+ unsigned long kprobe_inst=0;
+ unsigned int slot = addr & 0xf, template, major_opcode = 0;
+ bundle_t *bundle = &p->ainsn.insn.bundle;
+
+ memcpy(&p->opcode.bundle, kprobe_addr, sizeof(bundle_t));
+ memcpy(&p->ainsn.insn.bundle, kprobe_addr, sizeof(bundle_t));
+
+ template = bundle->quad0.template;
+
+ if(valid_kprobe_addr(template, slot, addr))
+ return -EINVAL;
+
+ /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot++;
+
+ /* Get kprobe_inst and major_opcode from the bundle */
+ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode);
+
+ if (unsupported_inst(template, slot, major_opcode, kprobe_inst, p))
+ return -EINVAL;
+
+ prepare_break_inst(template, slot, major_opcode, kprobe_inst, p);
+
+ return 0;
+}
+
+void arch_arm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ unsigned long addr = (unsigned long)p->addr;
+ unsigned long arm_addr = addr & ~0xFULL;
+
+ /* p->opcode contains the original unaltered bundle */
+ memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
+ flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+/*
+ * We are resuming execution after a single step fault, so the pt_regs
+ * structure reflects the register state after we executed the instruction
+ * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
+ * the ip to point back to the original stack address. To set the IP address
+ * to original stack address, handle the case where we need to fixup the
+ * relative IP address and/or fixup branch register.
+ */
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
+ unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
+ unsigned long template;
+ int slot = ((unsigned long)p->addr & 0xf);
+
+ template = p->opcode.bundle.quad0.template;
+
+ if (slot == 1 && bundle_encoding[template][1] == L)
+ slot = 2;
+
+ if (p->ainsn.inst_flag) {
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) {
+ /* Fix relative IP address */
+ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr;
+ }
+
+ if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) {
+ /*
+ * Fix target branch register, software convention is
+ * to use either b0 or b6 or b7, so just checking
+ * only those registers
+ */
+ switch (p->ainsn.target_br_reg) {
+ case 0:
+ if ((regs->b0 == bundle_addr) ||
+ (regs->b0 == bundle_addr + 0x10)) {
+ regs->b0 = (regs->b0 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 6:
+ if ((regs->b6 == bundle_addr) ||
+ (regs->b6 == bundle_addr + 0x10)) {
+ regs->b6 = (regs->b6 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ case 7:
+ if ((regs->b7 == bundle_addr) ||
+ (regs->b7 == bundle_addr + 0x10)) {
+ regs->b7 = (regs->b7 - bundle_addr) +
+ resume_addr;
+ }
+ break;
+ } /* end switch */
+ }
+ goto turn_ss_off;
+ }
+
+ if (slot == 2) {
+ if (regs->cr_iip == bundle_addr + 0x10) {
+ regs->cr_iip = resume_addr + 0x10;
+ }
+ } else {
+ if (regs->cr_iip == bundle_addr) {
+ regs->cr_iip = resume_addr;
+ }
+ }
+
+turn_ss_off:
+ /* Turn off Single Step bit */
+ ia64_psr(regs)->ss = 0;
+}
+
+static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
+ unsigned long slot = (unsigned long)p->addr & 0xf;
+
+ /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
+ regs->cr_iip = bundle_addr & ~0xFULL;
+
+ if (slot > 2)
+ slot = 0;
+
+ ia64_psr(regs)->ri = slot;
+
+ /* turn on single stepping */
+ ia64_psr(regs)->ss = 1;
+}
+
+static int pre_kprobes_handler(struct die_args *args)
+{
+ struct kprobe *p;
+ int ret = 0;
+ struct pt_regs *regs = args->regs;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
+
+ preempt_disable();
+
+ /* Handle recursion cases */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kprobe_status == KPROBE_HIT_SS) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+ /* We have reentered the pre_kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe();
+ set_current_kprobe(p);
+ p->nmissed++;
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else if (args->err == __IA64_BREAK_JPROBE) {
+ /*
+ * jprobe instrumented function just completed
+ */
+ p = current_kprobe;
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ } else {
+ /* Not our break */
+ goto no_kprobe;
+ }
+ }
+
+ lock_kprobes();
+ p = get_kprobe(addr);
+ if (!p) {
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+
+ kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p);
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /*
+ * Our pre-handler is specifically requesting that we just
+ * do a return. This is handling the case where the
+ * pre-handler is really our special jprobe pre-handler.
+ */
+ return 1;
+
+ss_probe:
+ prepare_ss(p, regs);
+ kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+static int post_kprobes_handler(struct pt_regs *regs)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
+ kprobe_status = KPROBE_HIT_SSDONE;
+ current_kprobe->post_handler(current_kprobe, regs, 0);
+ }
+
+ resume_execution(current_kprobe, regs);
+
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe();
+ goto out;
+ }
+
+ unlock_kprobes();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if (current_kprobe->fault_handler &&
+ current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ return 1;
+
+ if (kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(current_kprobe, regs);
+ unlock_kprobes();
+ preempt_enable_no_resched();
+ }
+
+ return 0;
+}
+
+int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ switch(val) {
+ case DIE_BREAK:
+ if (pre_kprobes_handler(args))
+ return NOTIFY_STOP;
+ break;
+ case DIE_SS:
+ if (post_kprobes_handler(args->regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_PAGE_FAULT:
+ if (kprobes_fault_handler(args->regs, args->trapnr))
+ return NOTIFY_STOP;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
+
+ /* save architectural state */
+ jprobe_saved_regs = *regs;
+
+ /* after rfi, execute the jprobe instrumented function */
+ regs->cr_iip = addr & ~0xFULL;
+ ia64_psr(regs)->ri = addr & 0xf;
+ regs->r1 = ((struct fnptr *)(jp->entry))->gp;
+
+ /*
+ * fix the return address to our jprobe_inst_return() function
+ * in the jprobes.S file
+ */
+ regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip;
+
+ return 1;
+}
+
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ *regs = jprobe_saved_regs;
+ return 1;
+}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 3865f088ffa2..623b0a546709 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -346,6 +346,7 @@ smp_callin (void)
lock_ipi_calllock();
cpu_set(cpuid, cpu_online_map);
unlock_ipi_calllock();
+ per_cpu(cpu_state, cpuid) = CPU_ONLINE;
smp_setup_percpu_timer();
@@ -611,6 +612,7 @@ void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callin_map);
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
/*
@@ -688,6 +690,7 @@ int __cpu_disable(void)
return -EBUSY;
remove_siblinginfo(cpu);
+ cpu_clear(cpu, cpu_online_map);
fixup_irqs();
local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map);
@@ -774,6 +777,7 @@ __cpu_up (unsigned int cpu)
if (cpu_isset(cpu, cpu_callin_map))
return -EINVAL;
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
if (ret < 0)
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 1861173bd4f6..e7e520d90f03 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -21,12 +21,26 @@
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/kdebug.h>
extern spinlock_t timerlist_lock;
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
+struct notifier_block *ia64die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&ia64die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
void __init
trap_init (void)
{
@@ -137,6 +151,10 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
switch (break_num) {
case 0: /* unknown error (used by GCC for __builtin_abort()) */
+ if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
die_if_kernel("bugcheck!", regs, break_num);
sig = SIGILL; code = ILL_ILLOPC;
break;
@@ -189,6 +207,15 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
sig = SIGILL; code = __ILL_BNDMOD;
break;
+ case 0x80200:
+ case 0x80300:
+ if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP) {
+ return;
+ }
+ sig = SIGTRAP; code = TRAP_BRKPT;
+ break;
+
default:
if (break_num < 0x40000 || break_num > 0x100000)
die_if_kernel("Bad break", regs, break_num);
@@ -548,7 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
#endif
break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
- case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
+ case 36:
+ if (notify_die(DIE_SS, "ss", &regs, vector,
+ vector, SIGTRAP) == NOTIFY_STOP)
+ return;
+ siginfo.si_code = TRAP_TRACE; ifa = 0; break;
}
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;