blob: edd086883ccbed5a84f2d44e0212d06b1af315a4 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
#include <linux/ipc.h>
#include <linux/errno.h>
#ifdef __KERNEL__
#include <asm/page.h>
#else
#include <unistd.h>
#endif
/*
* SHMMAX, SHMMNI and SHMALL are upper limits are defaults which can
* be increased by sysctl
*/
#define SHMMAX 0x2000000 /* max shared seg size (bytes) */
#define SHMMIN 1 /* min shared seg size (bytes) */
#define SHMMNI 4096 /* max num of segs system wide */
#ifdef __KERNEL__
#define SHMALL (SHMMAX/PAGE_SIZE*(SHMMNI/16)) /* max shm system wide (pages) */
#else
#define SHMALL (SHMMAX/getpagesize()*(SHMMNI/16))
#endif
#define SHMSEG SHMMNI /* max shared segs per process */
#ifdef __KERNEL__
#include <asm/shmparam.h>
#endif
/* Obsolete, used only for backwards compatibility and libc5 compiles */
struct shmid_ds {
struct ipc_perm shm_perm; /* operation perms */
int shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
__kernel_time_t shm_dtime; /* last detach time */
__kernel_time_t shm_ctime; /* last change time */
__kernel_ipc_pid_t shm_cpid; /* pid of creator */
__kernel_ipc_pid_t shm_lpid; /* pid of last operator */
unsigned short shm_nattch; /* no. of current attaches */
unsigned short shm_unused; /* compatibility */
void *shm_unused2; /* ditto - used by DIPC */
void *shm_unused3; /* unused */
};
/* Include the definition of shmid64_ds and shminfo64 */
#include <asm/shmbuf.h>
/* permission flag for shmget */
#define SHM_R 0400 /* or S_IRUGO from <linux/stat.h> */
#define SHM_W 0200 /* or S_IWUGO from <linux/stat.h> */
/* mode for attach */
#define SHM_RDONLY 010000 /* read-only access */
#define SHM_RND 020000 /* round attach address to SHMLBA boundary */
#define SHM_REMAP 040000 /* take-over region on attach */
#define SHM_EXEC 0100000 /* execution access */
/* super user shmctl commands */
#define SHM_LOCK 11
#define SHM_UNLOCK 12
/* ipcs ctl commands */
#define SHM_STAT 13
#define SHM_INFO 14
/* Obsolete, used only for backwards compatibility */
struct shminfo {
int shmmax;
int shmmin;
int shmmni;
int shmseg;
int shmall;
};
struct shm_info {
int used_ids;
unsigned long shm_tot; /* total allocated shm */
unsigned long shm_rss; /* total resident shm */
unsigned long shm_swp; /* total swapped shm */
unsigned long swap_attempts;
unsigned long swap_successes;
};
#ifdef __KERNEL__
struct shmid_kernel /* private to the kernel */
{
struct kern_ipc_perm shm_perm;
struct file * shm_file;
unsigned long shm_nattch;
unsigned long shm_segsz;
time_t shm_atim;
time_t shm_dtim;
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
struct user_struct *mlock_user;
/* The task created the shm object. NULL if the task is dead. */
struct task_struct *shm_creator;
};
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
#define SHM_LOCKED 02000 /* segment will not be swapped */
#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
#define SHM_NORESERVE 010000 /* don't check for reservations */
#ifdef CONFIG_SYSVIPC
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
extern int is_file_shm_hugepages(struct file *file);
extern void exit_shm(struct task_struct *task);
#else
static inline long do_shmat(int shmid, char __user *shmaddr,
int shmflg, unsigned long *addr,
unsigned long shmlba)
{
return -ENOSYS;
}
static inline int is_file_shm_hugepages(struct file *file)
{
return 0;
}
static inline void exit_shm(struct task_struct *task)
{
}
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_SHM_H_ */
|