summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Baulig <martin@src.gnome.org>1998-06-14 12:29:52 +0000
committerMartin Baulig <martin@src.gnome.org>1998-06-14 12:29:52 +0000
commitb56c327cccaa7fbb3d5cc83d3a334c95a5de5806 (patch)
treed6a936f869e5c75d0c8fdd092232cae1004f3372
parent8f43551d687d72539802f5dabcf4a366da40b080 (diff)
downloadlibgtop-b56c327cccaa7fbb3d5cc83d3a334c95a5de5806.tar.gz
This new system call now provides all information required for libgtop.table_0_02
-rw-r--r--kernel/main.c341
-rw-r--r--kernel/table.h98
-rw-r--r--kernel/test.c31
3 files changed, 440 insertions, 30 deletions
diff --git a/kernel/main.c b/kernel/main.c
index 7f6381bb..08a184d6 100644
--- a/kernel/main.c
+++ b/kernel/main.c
@@ -28,11 +28,168 @@
#include "version.h"
+#if defined(__i386__)
+# define KSTK_EIP(tsk) (((unsigned long *)tsk->kernel_stack_page)[1019])
+# define KSTK_ESP(tsk) (((unsigned long *)tsk->kernel_stack_page)[1022])
+#elif defined(__alpha__)
+ /*
+ * See arch/alpha/kernel/ptrace.c for details.
+ */
+# define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
+ + (long)&((struct pt_regs *)0)->reg)
+# define KSTK_EIP(tsk) (*(unsigned long *)(tsk->kernel_stack_page + PT_REG(pc)))
+# define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->tss.usp)
+#elif defined(__sparc__)
+# define PT_REG(reg) (PAGE_SIZE - sizeof(struct pt_regs) \
+ + (long)&((struct pt_regs *)0)->reg)
+# define KSTK_EIP(tsk) (*(unsigned long *)(tsk->kernel_stack_page + PT_REG(pc)))
+# define KSTK_ESP(tsk) (*(unsigned long *)(tsk->kernel_stack_page + PT_REG(u_regs[UREG_FP])))
+#endif
+
+static struct task_struct *
+get_task (pid_t pid)
+{
+ struct task_struct ** p;
+
+ p = task;
+ while (++p < task+NR_TASKS) {
+ if (*p && (*p)->pid == pid)
+ return *p;
+ }
+
+ return NULL;
+}
+
+static inline void statm_pte_range(pmd_t * pmd, unsigned long address, unsigned long size,
+ int * pages, int * shared, int * dirty, int * total)
+{
+ pte_t * pte;
+ unsigned long end;
+
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd)) {
+ printk("statm_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
+ pmd_clear(pmd);
+ return;
+ }
+ pte = pte_offset(pmd, address);
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t page = *pte;
+
+ address += PAGE_SIZE;
+ pte++;
+ if (pte_none(page))
+ continue;
+ ++*total;
+ if (!pte_present(page))
+ continue;
+ ++*pages;
+ if (pte_dirty(page))
+ ++*dirty;
+ if (pte_page(page) >= high_memory)
+ continue;
+ if (mem_map[MAP_NR(pte_page(page))].count > 1)
+ ++*shared;
+ } while (address < end);
+}
+
+static inline void statm_pmd_range(pgd_t * pgd, unsigned long address, unsigned long size,
+ int * pages, int * shared, int * dirty, int * total)
+{
+ pmd_t * pmd;
+ unsigned long end;
+
+ if (pgd_none(*pgd))
+ return;
+ if (pgd_bad(*pgd)) {
+ printk("statm_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
+ pgd_clear(pgd);
+ return;
+ }
+ pmd = pmd_offset(pgd, address);
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ statm_pte_range(pmd, address, end - address, pages, shared, dirty, total);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+}
+
+static void statm_pgd_range(pgd_t * pgd, unsigned long address, unsigned long end,
+ int * pages, int * shared, int * dirty, int * total)
+{
+ while (address < end) {
+ statm_pmd_range(pgd, address, end - address, pages, shared, dirty, total);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ pgd++;
+ }
+}
+
+static unsigned long
+get_wchan (struct task_struct *p)
+{
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+#if defined(__i386__)
+ {
+ unsigned long ebp, eip;
+ unsigned long stack_page;
+ int count = 0;
+
+ stack_page = p->kernel_stack_page;
+ if (!stack_page)
+ return 0;
+ ebp = p->tss.ebp;
+ do {
+ if (ebp < stack_page || ebp >= 4092+stack_page)
+ return 0;
+ eip = *(unsigned long *) (ebp+4);
+ if (eip < (unsigned long) interruptible_sleep_on
+ || eip >= (unsigned long) add_timer)
+ return eip;
+ ebp = *(unsigned long *) ebp;
+ } while (count++ < 16);
+ }
+#elif defined(__alpha__)
+ /*
+ * This one depends on the frame size of schedule(). Do a
+ * "disass schedule" in gdb to find the frame size. Also, the
+ * code assumes that sleep_on() follows immediately after
+ * interruptible_sleep_on() and that add_timer() follows
+ * immediately after interruptible_sleep(). Ugly, isn't it?
+ * Maybe adding a wchan field to task_struct would be better,
+ * after all...
+ */
+ {
+ unsigned long schedule_frame;
+ unsigned long pc;
+
+ pc = thread_saved_pc(&p->tss);
+ if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
+ schedule_frame = ((unsigned long *)p->tss.ksp)[6];
+ return ((unsigned long *)schedule_frame)[12];
+ }
+ return pc;
+ }
+#endif
+ return 0;
+}
+
asmlinkage int
-sys_table (int type, union table *buf)
+sys_table (int type, union table *buf, const void *param)
{
union table tbl;
struct sysinfo i;
+ struct task_struct *tsk = NULL;
+ pid_t pid;
int err;
if (type == TABLE_VERSION)
@@ -41,6 +198,31 @@ sys_table (int type, union table *buf)
if (!buf)
return -EFAULT;
+ memset (&tbl, 0, sizeof (union table));
+
+ /* For TABLE_PROC_*, read pid and get task_struct */
+
+ switch (type) {
+ case TABLE_PROC_UID:
+ case TABLE_PROC_MEM:
+ case TABLE_PROC_SEGMENT:
+ case TABLE_PROC_TIME:
+ case TABLE_PROC_STATE:
+ case TABLE_PROC_SIGNAL:
+ case TABLE_PROC_KERNEL:
+ err = verify_area (VERIFY_READ, param, sizeof (pid_t));
+ if (err)
+ return err;
+ memcpy_fromfs (&pid, param, sizeof (pid_t));
+
+ tsk = get_task (pid);
+ if (tsk == NULL)
+ return -ESRCH;
+ break;
+ }
+
+ /* Main function dispatcher */
+
switch (type) {
case TABLE_CPU:
tbl.cpu.total = jiffies;
@@ -66,9 +248,9 @@ sys_table (int type, union table *buf)
tbl.swap.free = i.freeswap;
break;
case TABLE_LOADAVG:
- tbl.loadavg.loadavg [0] = avenrun [0];
- tbl.loadavg.loadavg [1] = avenrun [1];
- tbl.loadavg.loadavg [2] = avenrun [2];
+ tbl.loadavg.loadavg [0] = (double) avenrun [0] / (1 << FSHIFT);
+ tbl.loadavg.loadavg [1] = (double) avenrun [1] / (1 << FSHIFT);
+ tbl.loadavg.loadavg [2] = (double) avenrun [2] / (1 << FSHIFT);
tbl.loadavg.nr_running = nr_running;
tbl.loadavg.nr_tasks = nr_tasks;
tbl.loadavg.last_pid = last_pid;
@@ -77,6 +259,157 @@ sys_table (int type, union table *buf)
tbl.uptime.uptime = jiffies;
tbl.uptime.idle = task[0]->utime + task[0]->stime;
break;
+ case TABLE_PROC_STATE:
+ tbl.proc_state.state = tsk->state;
+ tbl.proc_state.flags = tsk->flags;
+ memcpy (tbl.proc_state.comm, tsk->comm, sizeof (tbl.proc_state.comm));
+ break;
+ case TABLE_PROC_UID:
+ tbl.proc_uid.uid = tsk->uid;
+ tbl.proc_uid.euid = tsk->euid;
+ tbl.proc_uid.suid = tsk->suid;
+ tbl.proc_uid.fsuid = tsk->fsuid;
+
+ tbl.proc_uid.gid = tsk->gid;
+ tbl.proc_uid.egid = tsk->egid;
+ tbl.proc_uid.sgid = tsk->sgid;
+ tbl.proc_uid.fsgid = tsk->fsgid;
+
+ tbl.proc_uid.pid = tsk->pid;
+ tbl.proc_uid.pgrp = tsk->pgrp;
+ tbl.proc_uid.ppid = tsk->p_pptr->pid;
+
+ tbl.proc_uid.session = tsk->session;
+ tbl.proc_uid.tty = tsk->tty ? kdev_t_to_nr (tsk->tty->device) : 0;
+ tbl.proc_uid.tpgid = tsk->tty ? tsk->tty->pgrp : -1;
+
+ tbl.proc_uid.priority = tsk->priority;
+ tbl.proc_uid.counter = tsk->counter;
+ tbl.proc_uid.def_priority = DEF_PRIORITY;
+ break;
+ case TABLE_PROC_SIGNAL:
+ tbl.proc_signal.signal = tsk->signal;
+ tbl.proc_signal.blocked = tsk->blocked;
+
+ if (tsk->sig) {
+ struct sigaction * action = tsk->sig->action;
+ unsigned long sig_ign = 0, sig_caught = 0;
+ unsigned long bit = 1;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ switch((unsigned long) action->sa_handler) {
+ case 0:
+ break;
+ case 1:
+ sig_ign |= bit;
+ break;
+ default:
+ sig_caught |= bit;
+ }
+ bit <<= 1;
+ action++;
+ }
+
+ tbl.proc_signal.ignored = sig_ign;
+ tbl.proc_signal.caught = sig_caught;
+ } else {
+ tbl.proc_signal.ignored = 0;
+ tbl.proc_signal.caught = 0;
+ }
+ break;
+ case TABLE_PROC_MEM:
+ if (tsk->mm && tsk->mm != &init_mm) {
+ tbl.proc_mem.context = tsk->mm->context;
+ tbl.proc_mem.start_code = tsk->mm->start_code;
+ tbl.proc_mem.end_code = tsk->mm->end_code;
+ tbl.proc_mem.start_data = tsk->mm-> start_data;
+ tbl.proc_mem.end_data = tsk->mm->end_data;
+ tbl.proc_mem.start_brk = tsk->mm->start_brk;
+ tbl.proc_mem.brk = tsk->mm->brk;
+ tbl.proc_mem.start_stack = tsk->mm->start_stack;
+ tbl.proc_mem.start_mmap = tsk->mm->start_mmap;
+ tbl.proc_mem.arg_start = tsk->mm->arg_start;
+ tbl.proc_mem.arg_end = tsk->mm->arg_end;
+ tbl.proc_mem.env_start = tsk->mm->env_start;
+ tbl.proc_mem.env_end = tsk->mm->env_end;
+ tbl.proc_mem.rss = tsk->mm->rss;
+ tbl.proc_mem.total_vm = tsk->mm->total_vm;
+ tbl.proc_mem.locked_vm = tsk->mm->locked_vm;
+ }
+ tbl.proc_mem.rlim = tsk->rlim ? tsk->rlim[RLIMIT_RSS].rlim_cur : 0;
+ break;
+ case TABLE_PROC_SEGMENT:
+ if (tsk->mm && tsk->mm != &init_mm) {
+ unsigned long vsize = 0;
+ int size = 0, resident = 0, share = 0;
+ int trs = 0, lrs = 0, drs = 0, dt = 0;
+ struct vm_area_struct * vma = tsk->mm->mmap;
+
+ while (vma) {
+ pgd_t *pgd = pgd_offset(tsk->mm, vma->vm_start);
+ int pages = 0, shared = 0, dirty = 0, total = 0;
+
+ vsize += vma->vm_end - vma->vm_start;
+
+ statm_pgd_range (pgd, vma->vm_start, vma->vm_end,
+ &pages, &shared, &dirty, &total);
+ resident += pages;
+ share += shared;
+ dt += dirty;
+ size += total;
+ if (vma->vm_flags & VM_EXECUTABLE)
+ trs += pages; /* text */
+ else if (vma->vm_flags & VM_GROWSDOWN)
+ drs += pages; /* stack */
+ else if (vma->vm_end > 0x60000000)
+ lrs += pages; /* library */
+ else
+ drs += pages;
+ vma = vma->vm_next;
+ }
+
+ tbl.proc_segment.vsize = vsize;
+ tbl.proc_segment.size = size;
+ tbl.proc_segment.resident = resident;
+ tbl.proc_segment.shared = share;
+ tbl.proc_segment.trs = trs;
+ tbl.proc_segment.lrs = lrs;
+ tbl.proc_segment.dt = dt;
+ }
+ break;
+ case TABLE_PROC_TIME:
+ tbl.proc_time.utime = tsk->utime;
+ tbl.proc_time.stime = tsk->stime;
+ tbl.proc_time.cutime = tsk->cutime;
+ tbl.proc_time.cstime = tsk->cstime;
+
+ tbl.proc_time.start_time = tsk->start_time;
+ tbl.proc_time.timeout = tsk->timeout;
+ tbl.proc_time.policy = tsk->policy;
+ tbl.proc_time.rt_priority = tsk->rt_priority;
+
+ tbl.proc_time.it_real_value = tsk->it_real_value;
+ tbl.proc_time.it_prof_value = tsk->it_prof_value;
+ tbl.proc_time.it_virt_value = tsk->it_virt_value;
+ tbl.proc_time.it_real_incr = tsk->it_real_incr;
+ tbl.proc_time.it_prof_incr = tsk->it_prof_incr;
+ tbl.proc_time.it_virt_incr = tsk->it_virt_incr;
+ break;
+ case TABLE_PROC_KERNEL:
+ tbl.proc_kernel.min_flt = tsk->min_flt;
+ tbl.proc_kernel.cmin_flt = tsk->cmin_flt;
+ tbl.proc_kernel.maj_flt = tsk->maj_flt;
+ tbl.proc_kernel.cmaj_flt = tsk->cmaj_flt;
+
+ tbl.proc_kernel.kesp = tsk->kernel_stack_page ? KSTK_EIP(tsk) : 0;
+ tbl.proc_kernel.keip = tsk->kernel_stack_page ? KSTK_ESP(tsk) : 0;
+
+ tbl.proc_kernel.nswap = tsk->nswap;
+ tbl.proc_kernel.cnswap = tsk->cnswap;
+
+ tbl.proc_kernel.wchan = get_wchan (tsk);
+ break;
default:
return -EINVAL;
}
diff --git a/kernel/table.h b/kernel/table.h
index 9515fdc9..801b410b 100644
--- a/kernel/table.h
+++ b/kernel/table.h
@@ -1,13 +1,23 @@
#ifndef _LINUX_TABLE_H
#define _LINUX_TABLE_H
-#include <linux/types.h>
-#define TABLE_VERSION 0
-#define TABLE_CPU 1
-#define TABLE_MEM 2
-#define TABLE_SWAP 3
-#define TABLE_LOADAVG 4
-#define TABLE_UPTIME 5
+#ifdef _KERNEL
+#include <linux/types.h>
+#endif
+
+#define TABLE_VERSION 0
+#define TABLE_CPU 1
+#define TABLE_MEM 2
+#define TABLE_SWAP 3
+#define TABLE_LOADAVG 4
+#define TABLE_UPTIME 5
+#define TABLE_PROC_UID 6
+#define TABLE_PROC_MEM 7
+#define TABLE_PROC_SEGMENT 8
+#define TABLE_PROC_TIME 9
+#define TABLE_PROC_STATE 10
+#define TABLE_PROC_SIGNAL 11
+#define TABLE_PROC_KERNEL 12
/* CPU Usage (in jiffies = 1/100th seconds) */
@@ -46,7 +56,7 @@ struct table_swap
struct table_loadavg
{
- unsigned long loadavg [3];
+ double loadavg [3];
unsigned nr_running;
unsigned nr_tasks;
unsigned last_pid;
@@ -60,6 +70,67 @@ struct table_uptime
unsigned long idle;
};
+/* Information about processes. */
+
+struct table_proc_state
+{
+ long state;
+ unsigned long flags;
+ char comm[16];
+};
+
+struct table_proc_uid
+{
+ int uid, euid, suid, fsuid;
+ int gid, egid, sgid, fsgid;
+ int pid, pgrp, ppid;
+ int session;
+ unsigned int tty;
+ int tpgid;
+ long priority;
+ long counter;
+ long def_priority;
+};
+
+struct table_proc_mem
+{
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, rlim, total_vm, locked_vm;
+};
+
+struct table_proc_segment
+{
+ unsigned long vsize;
+ int size, resident, shared;
+ int trs, lrs, drs, dt;
+};
+
+struct table_proc_time
+{
+ long utime, stime, cutime, cstime, start_time;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+};
+
+struct table_proc_signal
+{
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long ignored; /* mask of ignored signals */
+ unsigned long caught; /* mask of caught signals */
+};
+
+struct table_proc_kernel
+{
+ unsigned long keip, kesp, wchan;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long nswap, cnswap;
+};
+
/* Union */
union table
@@ -69,12 +140,15 @@ union table
struct table_swap swap;
struct table_loadavg loadavg;
struct table_uptime uptime;
+ struct table_proc_uid proc_uid;
+ struct table_proc_mem proc_mem;
+ struct table_proc_segment proc_segment;
+ struct table_proc_time proc_time;
+ struct table_proc_state proc_state;
+ struct table_proc_signal proc_signal;
+ struct table_proc_kernel proc_kernel;
};
-#ifdef __KERNEL__
-
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_IPC_H */
diff --git a/kernel/test.c b/kernel/test.c
index a6ba8dc7..f4ed092c 100644
--- a/kernel/test.c
+++ b/kernel/test.c
@@ -7,15 +7,16 @@
#include <syscall.h>
-static inline _syscall2 (int, table, int, type, union table *, tbl);
+static inline _syscall3 (int, table, int, type, union table *, tbl, const void *, param);
int
main (void)
{
union table tbl;
+ unsigned count;
int ret;
- ret = table (TABLE_VERSION, NULL);
+ ret = table (TABLE_VERSION, NULL, NULL);
if (ret == -1) {
fprintf (stderr, "table(%u): %s\n", TABLE_VERSION, sys_errlist [errno]);
@@ -24,18 +25,20 @@ main (void)
fprintf (stderr, "Table (%u) = %u\n", TABLE_VERSION, ret);
- ret = table (TABLE_CPU, &tbl);
+ for (count = 0; count < 5; count++) {
+ ret = table (TABLE_CPU, &tbl, NULL);
- if (ret == -1) {
- fprintf (stderr, "table(%u): %s\n", TABLE_CPU, sys_errlist [errno]);
- exit (-errno);
- }
+ if (ret == -1) {
+ fprintf (stderr, "table(%u): %s\n", TABLE_CPU, sys_errlist [errno]);
+ exit (-errno);
+ }
- fprintf (stderr, "Table (%u) = %lu, %lu, %lu, %lu, %lu, %lu\n",
- TABLE_CPU, tbl.cpu.total, tbl.cpu.user, tbl.cpu.nice,
- tbl.cpu.sys, tbl.cpu.idle, tbl.cpu.frequency);
+ fprintf (stderr, "Table (%u) = %lu, %lu, %lu, %lu, %lu, %lu\n",
+ TABLE_CPU, tbl.cpu.total, tbl.cpu.user, tbl.cpu.nice,
+ tbl.cpu.sys, tbl.cpu.idle, tbl.cpu.frequency);
+ }
- ret = table (TABLE_MEM, &tbl);
+ ret = table (TABLE_MEM, &tbl, NULL);
if (ret == -1) {
fprintf (stderr, "table(%u): %s\n", TABLE_MEM, sys_errlist [errno]);
@@ -46,7 +49,7 @@ main (void)
TABLE_MEM, tbl.mem.total, tbl.mem.used, tbl.mem.free,
tbl.mem.shared, tbl.mem.buffer, tbl.mem.cached);
- ret = table (TABLE_SWAP, &tbl);
+ ret = table (TABLE_SWAP, &tbl, NULL);
if (ret == -1) {
fprintf (stderr, "table(%u): %s\n", TABLE_SWAP, sys_errlist [errno]);
@@ -56,7 +59,7 @@ main (void)
fprintf (stderr, "Table (%u) = %lu, %lu, %lu\n",
TABLE_SWAP, tbl.swap.total, tbl.swap.used, tbl.swap.free);
- ret = table (TABLE_LOADAVG, &tbl);
+ ret = table (TABLE_LOADAVG, &tbl, NULL);
if (ret == -1) {
fprintf (stderr, "table(%u): %s\n", TABLE_LOADAVG, sys_errlist [errno]);
@@ -68,7 +71,7 @@ main (void)
tbl.loadavg.loadavg [2], tbl.loadavg.nr_running,
tbl.loadavg.nr_tasks, tbl.loadavg.last_pid);
- ret = table (TABLE_UPTIME, &tbl);
+ ret = table (TABLE_UPTIME, &tbl, NULL);
if (ret == -1) {
fprintf (stderr, "table(%u): %s\n", TABLE_UPTIME, sys_errlist [errno]);