summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr/spin.c
blob: 59a7b4414c554dc20428d24cf29b90a40702eec2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/*-------------------------------------------------------------------------
 *
 * spin.c
 *	   Hardware-independent implementation of spinlocks.
 *
 *
 * For machines that have test-and-set (TAS) instructions, s_lock.h/.c
 * define the spinlock implementation.	This file contains only a stub
 * implementation for spinlocks using SysV semaphores.	The semaphore method
 * is too slow to be very useful :-(
 *
 *
 * Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
 *	  $Header: /cvsroot/pgsql/src/backend/storage/lmgr/spin.c,v 1.5 2001/10/25 05:49:42 momjian Exp $
 *
 *-------------------------------------------------------------------------
 */
#include "postgres.h"

#include <errno.h>

#include "storage/ipc.h"
/* In Ultrix, sem.h and shm.h must be included AFTER ipc.h */
#ifdef HAVE_SYS_SEM_H
#include <sys/sem.h>
#endif

#if defined(__darwin__)
#include "port/darwin/sem.h"
#endif

#include "storage/lwlock.h"
#include "storage/proc.h"
#include "storage/spin.h"


#ifdef HAS_TEST_AND_SET

/*
 * CreateSpinlocks --- create and initialize spinlocks during startup
 */
void
CreateSpinlocks(void)
{
	/* no-op when we have TAS spinlocks */
}

#else							/* !HAS_TEST_AND_SET */

/*
 * No TAS, so spinlocks are implemented using SysV semaphores.
 *
 * Typedef slock_t stores the semId and sem number of the sema to use.
 * The semas needed are created by CreateSpinlocks and doled out by
 * s_init_lock_sema.
 *
 * Since many systems have a rather small SEMMSL limit on semas per set,
 * we allocate the semaphores required in sets of SPINLOCKS_PER_SET semas.
 * This value is deliberately made equal to PROC_NSEMS_PER_SET so that all
 * sema sets allocated by Postgres will be the same size; that eases the
 * semaphore-recycling logic in IpcSemaphoreCreate().
 *
 * Note that the SpinLockIds array is not in shared memory; it is filled
 * by the postmaster and then inherited through fork() by backends.  This
 * is OK because its contents do not change after shmem initialization.
 */

#define SPINLOCKS_PER_SET  PROC_NSEMS_PER_SET

static IpcSemaphoreId *SpinLockIds = NULL;

static int	numSpinSets = 0;	/* number of sema sets used */
static int	numSpinLocks = 0;	/* total number of semas allocated */
static int	nextSpinLock = 0;	/* next free spinlock index */

static void SpinFreeAllSemaphores(void);


/*
 * CreateSpinlocks --- create and initialize spinlocks during startup
 */
void
CreateSpinlocks(void)
{
	int			i;

	if (SpinLockIds == NULL)
	{
		/*
		 * Compute number of spinlocks needed.	It would be cleaner to
		 * distribute this logic into the affected modules, similar to the
		 * way shmem space estimation is handled.
		 *
		 * For now, though, we just need a few spinlocks (10 should be
		 * plenty) plus one for each LWLock.
		 */
		numSpinLocks = NumLWLocks() + 10;

		/* might as well round up to a multiple of SPINLOCKS_PER_SET */
		numSpinSets = (numSpinLocks - 1) / SPINLOCKS_PER_SET + 1;
		numSpinLocks = numSpinSets * SPINLOCKS_PER_SET;

		SpinLockIds = (IpcSemaphoreId *)
			malloc(numSpinSets * sizeof(IpcSemaphoreId));
		Assert(SpinLockIds != NULL);
	}

	for (i = 0; i < numSpinSets; i++)
		SpinLockIds[i] = -1;

	/*
	 * Arrange to delete semas on exit --- set this up now so that we will
	 * clean up if allocation fails.  We use our own freeproc, rather than
	 * IpcSemaphoreCreate's removeOnExit option, because we don't want to
	 * fill up the on_shmem_exit list with a separate entry for each
	 * semaphore set.
	 */
	on_shmem_exit(SpinFreeAllSemaphores, 0);

	/* Create sema sets and set all semas to count 1 */
	for (i = 0; i < numSpinSets; i++)
	{
		SpinLockIds[i] = IpcSemaphoreCreate(SPINLOCKS_PER_SET,
											IPCProtection,
											1,
											false);
	}

	/* Init counter for allocating dynamic spinlocks */
	nextSpinLock = 0;
}

/*
 * SpinFreeAllSemaphores -
 *	  called at shmem_exit time, ie when exiting the postmaster or
 *	  destroying shared state for a failed set of backends.
 *	  Free up all the semaphores allocated for spinlocks.
 */
static void
SpinFreeAllSemaphores(void)
{
	int			i;

	for (i = 0; i < numSpinSets; i++)
	{
		if (SpinLockIds[i] >= 0)
			IpcSemaphoreKill(SpinLockIds[i]);
	}
	free(SpinLockIds);
	SpinLockIds = NULL;
}

/*
 * s_lock.h hardware-spinlock emulation
 */

void
s_init_lock_sema(volatile slock_t *lock)
{
	if (nextSpinLock >= numSpinLocks)
		elog(FATAL, "s_init_lock_sema: not enough semaphores");
	lock->semId = SpinLockIds[nextSpinLock / SPINLOCKS_PER_SET];
	lock->sem = nextSpinLock % SPINLOCKS_PER_SET;
	nextSpinLock++;
}

void
s_unlock_sema(volatile slock_t *lock)
{
	IpcSemaphoreUnlock(lock->semId, lock->sem);
}

bool
s_lock_free_sema(volatile slock_t *lock)
{
	return IpcSemaphoreGetValue(lock->semId, lock->sem) > 0;
}

int
tas_sema(volatile slock_t *lock)
{
	/* Note that TAS macros return 0 if *success* */
	return !IpcSemaphoreTryLock(lock->semId, lock->sem);
}
#endif	 /* !HAS_TEST_AND_SET */