summaryrefslogtreecommitdiff
path: root/storage/innobase/include/lock0lock.h
blob: 16acd0311778e0c49cafb8c3a4170ed265b1061d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
/*****************************************************************************

Copyright (c) 1996, 2022, Oracle and/or its affiliates.
Copyright (c) 2017, 2022, MariaDB Corporation.

This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.

This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA

*****************************************************************************/

/**************************************************//**
@file include/lock0lock.h
The transaction lock system

Created 5/7/1996 Heikki Tuuri
*******************************************************/

#ifndef lock0lock_h
#define lock0lock_h

#include "buf0types.h"
#include "trx0trx.h"
#include "mtr0types.h"
#include "rem0types.h"
#include "hash0hash.h"
#include "srv0srv.h"
#include "ut0vec.h"
#include "gis0rtree.h"
#include "lock0prdt.h"
#include "transactional_lock_guard.h"

// Forward declaration
class ReadView;

/** The value of innodb_deadlock_detect */
extern my_bool innodb_deadlock_detect;
/** The value of innodb_deadlock_report */
extern ulong innodb_deadlock_report;

namespace Deadlock
{
  /** The allowed values of innodb_deadlock_report */
  enum report { REPORT_OFF, REPORT_BASIC, REPORT_FULL };
}

/*********************************************************************//**
Gets the heap_no of the smallest user record on a page.
@return heap_no of smallest user record, or PAGE_HEAP_NO_SUPREMUM */
UNIV_INLINE
ulint
lock_get_min_heap_no(
/*=================*/
	const buf_block_t*	block);	/*!< in: buffer block */

/** Discard locks for an index when purging DELETE FROM SYS_INDEXES
after an aborted CREATE INDEX operation.
@param index   a stale index on which ADD INDEX operation was aborted */
ATTRIBUTE_COLD void lock_discard_for_index(const dict_index_t &index);

/*************************************************************//**
Updates the lock table when we have reorganized a page. NOTE: we copy
also the locks set on the infimum of the page; the infimum may carry
locks if an update of a record is occurring on the page, and its locks
were temporarily stored on the infimum. */
void
lock_move_reorganize_page(
/*======================*/
	const buf_block_t*	block,	/*!< in: old index page, now
					reorganized */
	const buf_block_t*	oblock);/*!< in: copy of the old, not
					reorganized page */
/*************************************************************//**
Moves the explicit locks on user records to another page if a record
list end is moved to another page. */
void
lock_move_rec_list_end(
/*===================*/
	const buf_block_t*	new_block,	/*!< in: index page to move to */
	const buf_block_t*	block,		/*!< in: index page */
	const rec_t*		rec);		/*!< in: record on page: this
						is the first record moved */
/*************************************************************//**
Moves the explicit locks on user records to another page if a record
list start is moved to another page. */
void
lock_move_rec_list_start(
/*=====================*/
	const buf_block_t*	new_block,	/*!< in: index page to move to */
	const buf_block_t*	block,		/*!< in: index page */
	const rec_t*		rec,		/*!< in: record on page:
						this is the first
						record NOT copied */
	const rec_t*		old_end);	/*!< in: old
						previous-to-last
						record on new_page
						before the records
						were copied */
/*************************************************************//**
Updates the lock table when a page is split to the right. */
void
lock_update_split_right(
/*====================*/
	const buf_block_t*	right_block,	/*!< in: right page */
	const buf_block_t*	left_block);	/*!< in: left page */
/*************************************************************//**
Updates the lock table when a page is merged to the right. */
void
lock_update_merge_right(
/*====================*/
	const buf_block_t*	right_block,	/*!< in: right page to
						which merged */
	const rec_t*		orig_succ,	/*!< in: original
						successor of infimum
						on the right page
						before merge */
	const buf_block_t*	left_block);	/*!< in: merged index
						page which will be
						discarded */
/** Update locks when the root page is copied to another in
btr_root_raise_and_insert(). Note that we leave lock structs on the
root page, even though they do not make sense on other than leaf
pages: the reason is that in a pessimistic update the infimum record
of the root page will act as a dummy carrier of the locks of the record
to be updated. */
void lock_update_root_raise(const buf_block_t &block, const page_id_t root);
/** Update the lock table when a page is copied to another.
@param new_block  the target page
@param old        old page (not index root page) */
void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old);

/** Update gap locks between the last record of the left_block and the
first record of the right_block when a record is about to be inserted
at the start of the right_block, even though it should "naturally" be
inserted as the last record of the left_block according to the
current node pointer in the parent page.

That is, we assume that the lowest common ancestor of the left_block
and right_block routes the key of the new record to the left_block,
but a heuristic which tries to avoid overflowing left_block has chosen
to insert the record into right_block instead. Said ancestor performs
this routing by comparing the key of the record to a "split point" -
all records greater or equal to than the split point (node pointer)
are in right_block, and smaller ones in left_block.
The split point may be smaller than the smallest key in right_block.

The gap between the last record on the left_block and the first record
on the right_block is represented as a gap lock attached to the supremum
pseudo-record of left_block, and a gap lock attached to the new first
record of right_block.

Thus, inserting the new record, and subsequently adjusting the node
pointers in parent pages to values smaller or equal to the new
records' key, will mean that gap will be sliced at a different place
("moved to the left"): fragment of the 1st gap will now become treated
as 2nd. Therefore, we must copy any GRANTED locks from 1st gap to the
2nd gap. Any WAITING locks must be of INSERT_INTENTION type (as no
other GAP locks ever wait for anything) and can stay at 1st gap, as
their only purpose is to notify the requester they can retry
insertion, and there's no correctness requirement to avoid waking them
up too soon.
@param left_block   left page
@param right_block  right page */
void lock_update_node_pointer(const buf_block_t *left_block,
                              const buf_block_t *right_block);
/*************************************************************//**
Updates the lock table when a page is split to the left. */
void
lock_update_split_left(
/*===================*/
	const buf_block_t*	right_block,	/*!< in: right page */
	const buf_block_t*	left_block);	/*!< in: left page */
/** Update the lock table when a page is merged to the left.
@param left      left page
@param orig_pred original predecessor of supremum on the left page before merge
@param right     merged, to-be-discarded right page */
void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
                            const page_id_t right);

/** Update the locks when a page is split and merged to two pages,
in defragmentation. */
void lock_update_split_and_merge(
	const buf_block_t* left_block,	/*!< in: left page to which merged */
	const rec_t* orig_pred,		/*!< in: original predecessor of
					supremum on the left page before merge*/
	const buf_block_t* right_block);/*!< in: right page from which merged */
/*************************************************************//**
Resets the original locks on heir and replaces them with gap type locks
inherited from rec. */
void
lock_rec_reset_and_inherit_gap_locks(
/*=================================*/
	const buf_block_t&	heir_block,	/*!< in: block containing the
						record which inherits */
	const page_id_t		donor,		/*!< in: page containing the
						record from which inherited;
						does NOT reset the locks on
						this record */
	ulint			heir_heap_no,	/*!< in: heap_no of the
						inheriting record */
	ulint			heap_no);	/*!< in: heap_no of the
						donating record */
/*************************************************************//**
Updates the lock table when a page is discarded. */
void
lock_update_discard(
/*================*/
	const buf_block_t*	heir_block,	/*!< in: index page
						which will inherit the locks */
	ulint			heir_heap_no,	/*!< in: heap_no of the record
						which will inherit the locks */
	const buf_block_t*	block);		/*!< in: index page
						which will be discarded */
/*************************************************************//**
Updates the lock table when a new user record is inserted. */
void
lock_update_insert(
/*===============*/
	const buf_block_t*	block,	/*!< in: buffer block containing rec */
	const rec_t*		rec);	/*!< in: the inserted record */
/*************************************************************//**
Updates the lock table when a record is removed. */
void
lock_update_delete(
/*===============*/
	const buf_block_t*	block,	/*!< in: buffer block containing rec */
	const rec_t*		rec);	/*!< in: the record to be removed */
/*********************************************************************//**
Stores on the page infimum record the explicit locks of another record.
This function is used to store the lock state of a record when it is
updated and the size of the record changes in the update. The record
is in such an update moved, perhaps to another page. The infimum record
acts as a dummy carrier record, taking care of lock releases while the
actual record is being moved. */
void
lock_rec_store_on_page_infimum(
/*===========================*/
	const buf_block_t*	block,	/*!< in: buffer block containing rec */
	const rec_t*		rec);	/*!< in: record whose lock state
					is stored on the infimum
					record of the same page; lock
					bits are reset on the
					record */
/** Restore the explicit lock requests on a single record, where the
state was stored on the infimum of a page.
@param block   buffer block containing rec
@param rec     record whose lock state is restored
@param donator page (rec is not necessarily on this page)
whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
					const rec_t *rec, page_id_t donator);

/**
Create a table lock, without checking for deadlocks or lock compatibility.
@param table      table on which the lock is created
@param type_mode  lock type and mode
@param trx        transaction
@param c_lock     conflicting lock
@return the created lock object */
lock_t *lock_table_create(dict_table_t *table, unsigned type_mode, trx_t *trx,
                          lock_t *c_lock= nullptr);

/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
be suspended for some reason; if not, then puts the transaction and
the query thread to the lock wait state and inserts a waiting request
for a gap x-lock to the lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_rec_insert_check_and_lock(
/*===========================*/
	const rec_t*	rec,	/*!< in: record after which to insert */
	buf_block_t*	block,	/*!< in/out: buffer block of rec */
	dict_index_t*	index,	/*!< in: index */
	que_thr_t*	thr,	/*!< in: query thread */
	mtr_t*		mtr,	/*!< in/out: mini-transaction */
	bool*		inherit)/*!< out: set to true if the new
				inserted record maybe should inherit
				LOCK_GAP type locks from the successor
				record */
	MY_ATTRIBUTE((warn_unused_result));

/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
first tests if the query thread should anyway be suspended for some
reason; if not, then puts the transaction and the query thread to the
lock wait state and inserts a waiting request for a record x-lock to the
lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
	const buf_block_t*	block,	/*!< in: buffer block of rec */
	const rec_t*		rec,	/*!< in: record which should be
					modified */
	dict_index_t*		index,	/*!< in: clustered index */
	const rec_offs*		offsets,/*!< in: rec_get_offsets(rec, index) */
	que_thr_t*		thr)	/*!< in: query thread */
	MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify
(delete mark or delete unmark) of a secondary index record.
@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_sec_rec_modify_check_and_lock(
/*===============================*/
	ulint		flags,	/*!< in: if BTR_NO_LOCKING_FLAG
				bit is set, does nothing */
	buf_block_t*	block,	/*!< in/out: buffer block of rec */
	const rec_t*	rec,	/*!< in: record which should be
				modified; NOTE: as this is a secondary
				index, we always have to modify the
				clustered index record first: see the
				comment below */
	dict_index_t*	index,	/*!< in: secondary index */
	que_thr_t*	thr,	/*!< in: query thread
				(can be NULL if BTR_NO_LOCKING_FLAG) */
	mtr_t*		mtr)	/*!< in/out: mini-transaction */
	MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Like lock_clust_rec_read_check_and_lock(), but reads a
secondary index record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_sec_rec_read_check_and_lock(
/*=============================*/
	ulint			flags,	/*!< in: if BTR_NO_LOCKING_FLAG
					bit is set, does nothing */
	const buf_block_t*	block,	/*!< in: buffer block of rec */
	const rec_t*		rec,	/*!< in: user record or page
					supremum record which should
					be read or passed over by a
					read cursor */
	dict_index_t*		index,	/*!< in: secondary index */
	const rec_offs*		offsets,/*!< in: rec_get_offsets(rec, index) */
	lock_mode		mode,	/*!< in: mode of the lock which
					the read cursor should set on
					records: LOCK_S or LOCK_X; the
					latter is possible in
					SELECT FOR UPDATE */
	unsigned		gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
					LOCK_REC_NOT_GAP */
	que_thr_t*		thr);	/*!< in: query thread */
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate read, or passing
over by a read cursor, of a clustered index record. If they do, first tests
if the query thread should anyway be suspended for some reason; if not, then
puts the transaction and the query thread to the lock wait state and inserts a
waiting request for a record lock to the lock queue. Sets the requested mode
lock on the record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_clust_rec_read_check_and_lock(
/*===============================*/
	ulint			flags,	/*!< in: if BTR_NO_LOCKING_FLAG
					bit is set, does nothing */
	const buf_block_t*	block,	/*!< in: buffer block of rec */
	const rec_t*		rec,	/*!< in: user record or page
					supremum record which should
					be read or passed over by a
					read cursor */
	dict_index_t*		index,	/*!< in: clustered index */
	const rec_offs*		offsets,/*!< in: rec_get_offsets(rec, index) */
	lock_mode		mode,	/*!< in: mode of the lock which
					the read cursor should set on
					records: LOCK_S or LOCK_X; the
					latter is possible in
					SELECT FOR UPDATE */
	unsigned		gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
					LOCK_REC_NOT_GAP */
	que_thr_t*		thr);	/*!< in: query thread */
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate read, or passing
over by a read cursor, of a clustered index record. If they do, first tests
if the query thread should anyway be suspended for some reason; if not, then
puts the transaction and the query thread to the lock wait state and inserts a
waiting request for a record lock to the lock queue. Sets the requested mode
lock on the record. This is an alternative version of
lock_clust_rec_read_check_and_lock() that does not require the parameter
"offsets".
@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */
dberr_t
lock_clust_rec_read_check_and_lock_alt(
/*===================================*/
	ulint			flags,	/*!< in: if BTR_NO_LOCKING_FLAG
					bit is set, does nothing */
	const buf_block_t*	block,	/*!< in: buffer block of rec */
	const rec_t*		rec,	/*!< in: user record or page
					supremum record which should
					be read or passed over by a
					read cursor */
	dict_index_t*		index,	/*!< in: clustered index */
	lock_mode		mode,	/*!< in: mode of the lock which
					the read cursor should set on
					records: LOCK_S or LOCK_X; the
					latter is possible in
					SELECT FOR UPDATE */
	unsigned		gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
					LOCK_REC_NOT_GAP */
	que_thr_t*		thr)	/*!< in: query thread */
	MY_ATTRIBUTE((warn_unused_result));

/** Acquire a table lock.
@param table   table to be locked
@param fktable pointer to table, in case of a FOREIGN key check
@param mode    lock mode
@param thr     SQL execution thread
@retval DB_SUCCESS    if the lock was acquired
@retval DB_DEADLOCK   if a deadlock occurred, or fktable && *fktable != table
@retval DB_LOCK_WAIT  if lock_wait() must be invoked */
dberr_t lock_table(dict_table_t *table, dict_table_t *const*fktable,
                   lock_mode mode, que_thr_t *thr)
  MY_ATTRIBUTE((warn_unused_result));

/** Create a table lock object for a resurrected transaction.
@param table    table to be X-locked
@param trx      transaction
@param mode     LOCK_X or LOCK_IX */
void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode);

/** Sets a lock on a table based on the given mode.
@param table	table to lock
@param trx	transaction
@param mode	LOCK_X or LOCK_S
@param no_wait  whether to skip handling DB_LOCK_WAIT
@return error code */
dberr_t lock_table_for_trx(dict_table_t *table, trx_t *trx, lock_mode mode,
                           bool no_wait= false)
	MY_ATTRIBUTE((nonnull, warn_unused_result));

/** Exclusively lock the data dictionary tables.
@param trx  dictionary transaction
@return error code
@retval DB_SUCCESS on success */
dberr_t lock_sys_tables(trx_t *trx);

/*************************************************************//**
Removes a granted record lock of a transaction from the queue and grants
locks to other transactions waiting in the queue if they now are entitled
to a lock. */
void
lock_rec_unlock(
/*============*/
	trx_t*			trx,	/*!< in/out: transaction that has
					set a record lock */
	const page_id_t		id,	/*!< in: page containing rec */
	const rec_t*		rec,	/*!< in: record */
	lock_mode		lock_mode);/*!< in: LOCK_S or LOCK_X */

/** Release the explicit locks of a committing transaction,
and release possible other transactions waiting because of these locks. */
void lock_release(trx_t* trx);

/** Release the explicit locks of a committing transaction while
dict_sys.latch is exclusively locked,
and release possible other transactions waiting because of these locks. */
void lock_release_on_drop(trx_t *trx);

/** Release non-exclusive locks on XA PREPARE,
and release possible other transactions waiting because of these locks. */
void lock_release_on_prepare(trx_t *trx);

/** Release locks on a table whose creation is being rolled back */
ATTRIBUTE_COLD void lock_release_on_rollback(trx_t *trx, dict_table_t *table);

/**********************************************************************//**
Looks for a set bit in a record lock bitmap. Returns ULINT_UNDEFINED,
if none found.
@return bit index == heap number of the record, or ULINT_UNDEFINED if
none found */
ulint
lock_rec_find_set_bit(
/*==================*/
	const lock_t*	lock);	/*!< in: record lock with at least one
				bit set */

/*********************************************************************//**
Checks if a lock request lock1 has to wait for request lock2.
@return whether lock1 has to wait for lock2 to be removed */
bool
lock_has_to_wait(
/*=============*/
	const lock_t*	lock1,	/*!< in: waiting lock */
	const lock_t*	lock2);	/*!< in: another lock; NOTE that it is
				assumed that this has a lock bit set
				on the same record as in lock1 if the
				locks are record locks */
/*********************************************************************//**
Reports that a transaction id is insensible, i.e., in the future. */
ATTRIBUTE_COLD
void
lock_report_trx_id_insanity(
/*========================*/
	trx_id_t	trx_id,		/*!< in: trx id */
	const rec_t*	rec,		/*!< in: user record */
	dict_index_t*	index,		/*!< in: index */
	const rec_offs*	offsets,	/*!< in: rec_get_offsets(rec, index) */
	trx_id_t	max_trx_id);	/*!< in: trx_sys.get_max_trx_id() */
/*********************************************************************//**
Prints info of locks for all transactions.
@return FALSE if not able to acquire lock_sys.latch (and display info) */
ibool
lock_print_info_summary(
/*====================*/
	FILE*	file,	/*!< in: file where to print */
	ibool   nowait)	/*!< in: whether to wait for lock_sys.latch */
	MY_ATTRIBUTE((warn_unused_result));

/** Prints transaction lock wait and MVCC state.
@param[in,out]	file	file where to print
@param[in]	trx	transaction
@param[in]	now	current my_hrtime_coarse() */
void lock_trx_print_wait_and_mvcc_state(FILE *file, const trx_t *trx,
                                        my_hrtime_t now);

/*********************************************************************//**
Prints info of locks for each transaction. This function will release
lock_sys.latch, which the caller must be holding in exclusive mode. */
void
lock_print_info_all_transactions(
/*=============================*/
	FILE*	file);	/*!< in: file where to print */

/*********************************************************************//**
Return the number of table locks for a transaction.
The caller must be holding lock_sys.latch. */
ulint
lock_number_of_tables_locked(
/*=========================*/
	const trx_lock_t*	trx_lock)	/*!< in: transaction locks */
	MY_ATTRIBUTE((warn_unused_result));

/** Check if there are any locks on a table.
@return true if table has either table or record locks. */
bool lock_table_has_locks(dict_table_t *table);

/** Wait for a lock to be released.
@retval DB_DEADLOCK if this transaction was chosen as the deadlock victim
@retval DB_INTERRUPTED if the execution was interrupted by the user
@retval DB_LOCK_WAIT_TIMEOUT if the lock wait timed out
@retval DB_SUCCESS if the lock was granted */
dberr_t lock_wait(que_thr_t *thr);
/*********************************************************************//**
Unlocks AUTO_INC type locks that were possibly reserved by a trx. This
function should be called at the the end of an SQL statement, by the
connection thread that owns the transaction (trx->mysql_thd). */
void
lock_unlock_table_autoinc(
/*======================*/
	trx_t*	trx);			/*!< in/out: transaction */

/** Handle a pending lock wait (DB_LOCK_WAIT) in a semi-consistent read
while holding a clustered index leaf page latch.
@param trx           transaction that is or was waiting for a lock
@retval DB_SUCCESS   if the lock was granted
@retval DB_DEADLOCK  if the transaction must be aborted due to a deadlock
@retval DB_LOCK_WAIT if a lock wait would be necessary; the pending
                     lock request was released */
dberr_t lock_trx_handle_wait(trx_t *trx);

/*********************************************************************//**
Checks that a transaction id is sensible, i.e., not in the future.
@return true if ok */
bool
lock_check_trx_id_sanity(
/*=====================*/
	trx_id_t	trx_id,		/*!< in: trx id */
	const rec_t*	rec,		/*!< in: user record */
	dict_index_t*	index,		/*!< in: index */
	const rec_offs*	offsets);	/*!< in: rec_get_offsets(rec, index) */
#ifdef UNIV_DEBUG
/*******************************************************************//**
Check if the transaction holds any locks on the sys tables
or its records.
@return the strongest lock found on any sys table or 0 for none */
const lock_t*
lock_trx_has_sys_table_locks(
/*=========================*/
	const trx_t*	trx)	/*!< in: transaction to check */
	MY_ATTRIBUTE((nonnull, warn_unused_result));

/** Check if the transaction holds an explicit exclusive lock on a record.
@param[in]	trx	transaction
@param[in]	table	table
@param[in]	id	leaf page identifier
@param[in]	heap_no	heap number identifying the record
@return whether an explicit X-lock is held */
bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
                              page_id_t id, ulint heap_no);
#endif /* UNIV_DEBUG */

/** Lock operation struct */
struct lock_op_t{
	dict_table_t*	table;	/*!< table to be locked */
	lock_mode	mode;	/*!< lock mode */
};

/** The lock system struct */
class lock_sys_t
{
  friend struct LockGuard;
  friend struct LockMultiGuard;
  friend struct TMLockGuard;
  friend struct TMLockMutexGuard;
  friend struct TMLockTrxGuard;

  /** Hash table latch */
  struct hash_latch
#ifdef SUX_LOCK_GENERIC
  : private rw_lock
  {
    /** Wait for an exclusive lock */
    void wait();
    /** Try to acquire a lock */
    bool try_acquire() { return write_trylock(); }
    /** Acquire a lock */
    void acquire() { if (!try_acquire()) wait(); }
    /** Release a lock */
    void release();
    /** @return whether any lock is being held or waited for by any thread */
    bool is_locked_or_waiting() const
    { return rw_lock::is_locked_or_waiting(); }
    /** @return whether this latch is possibly held by any thread */
    bool is_locked() const { return rw_lock::is_locked(); }
#else
  {
  private:
    srw_spin_lock_low lock;
  public:
    /** Try to acquire a lock */
    bool try_acquire() { return lock.wr_lock_try(); }
    /** Acquire a lock */
    void acquire() { lock.wr_lock(); }
    /** Release a lock */
    void release() { lock.wr_unlock(); }
    /** @return whether any lock may be held by any thread */
    bool is_locked_or_waiting() const noexcept
    { return lock.is_locked_or_waiting(); }
    /** @return whether this latch is possibly held by any thread */
    bool is_locked() const noexcept { return lock.is_locked(); }
#endif
  };

public:
  struct hash_table
  {
    /** Number of consecutive array[] elements occupied by a hash_latch */
    static constexpr size_t LATCH= sizeof(void*) >= sizeof(hash_latch) ? 1 : 2;
    static_assert(sizeof(hash_latch) <= LATCH * sizeof(void*), "allocation");

    /** Number of array[] elements per hash_latch.
    Must be LATCH less than a power of 2. */
    static constexpr size_t ELEMENTS_PER_LATCH= (64 / sizeof(void*)) - LATCH;
    static constexpr size_t EMPTY_SLOTS_PER_LATCH=
      ((CPU_LEVEL1_DCACHE_LINESIZE / 64) - 1) * (64 / sizeof(void*));

    /** number of payload elements in array[]. Protected by lock_sys.latch. */
    ulint n_cells;
    /** the hash table, with pad(n_cells) elements, aligned to L1 cache size;
    in any hash chain, lock_t::is_waiting() entries must not precede
    granted locks */
    hash_cell_t *array;

    /** Create the hash table.
    @param n  the lower bound of n_cells */
    void create(ulint n);

    /** Resize the hash table.
    @param n  the lower bound of n_cells */
    void resize(ulint n);

    /** Free the hash table. */
    void free() { aligned_free(array); array= nullptr; }

    /** @return the index of an array element */
    inline ulint calc_hash(ulint fold) const;

    /** @return raw array index converted to padded index */
    static ulint pad(ulint h)
    {
      ulint latches= LATCH * (h / ELEMENTS_PER_LATCH);
      ulint empty_slots= (h / ELEMENTS_PER_LATCH) * EMPTY_SLOTS_PER_LATCH;
      return LATCH + latches + empty_slots + h;
    }

    /** Get a latch. */
    static hash_latch *latch(hash_cell_t *cell)
    {
      void *l= ut_align_down(cell, sizeof *cell *
                             (ELEMENTS_PER_LATCH + LATCH));
      return static_cast<hash_latch*>(l);
    }
    /** Get a hash table cell. */
    inline hash_cell_t *cell_get(ulint fold) const;

#ifdef UNIV_DEBUG
    void assert_locked(const page_id_t id) const;
#else
    void assert_locked(const page_id_t) const {}
#endif

  private:
    /** @return the hash value before any ELEMENTS_PER_LATCH padding */
    static ulint hash(ulint fold, ulint n) { return ut_hash_ulint(fold, n); }

    /** @return the index of an array element */
    static ulint calc_hash(ulint fold, ulint n_cells)
    {
      return pad(hash(fold, n_cells));
    }
  };

private:
  bool m_initialised;

  /** mutex proteting the locks */
  alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
#ifdef UNIV_DEBUG
  /** The owner of exclusive latch (0 if none); protected by latch */
  std::atomic<pthread_t> writer{0};
  /** Number of shared latches */
  std::atomic<ulint> readers{0};
#endif
#ifdef SUX_LOCK_GENERIC
protected:
  /** mutex for hash_latch::wait() */
  pthread_mutex_t hash_mutex;
  /** condition variable for hash_latch::wait() */
  pthread_cond_t hash_cond;
#endif
public:
  /** record locks */
  hash_table rec_hash;
  /** predicate locks for SPATIAL INDEX */
  hash_table prdt_hash;
  /** page locks for SPATIAL INDEX */
  hash_table prdt_page_hash;

  /** mutex covering lock waits; @see trx_lock_t::wait_lock */
  alignas(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t wait_mutex;
private:
  /** The increment of wait_count for a wait. Anything smaller is a
  pending wait count. */
  static constexpr uint64_t WAIT_COUNT_STEP= 1U << 19;
  /** waits and total number of lock waits; protected by wait_mutex */
  uint64_t wait_count;
  /** Cumulative wait time; protected by wait_mutex */
  uint32_t wait_time;
  /** Longest wait time; protected by wait_mutex */
  uint32_t wait_time_max;
public:
  /** number of deadlocks detected; protected by wait_mutex */
  ulint deadlocks;
  /** number of lock wait timeouts; protected by wait_mutex */
  ulint timeouts;
  /**
    Constructor.

    Some members may require late initialisation, thus we just mark object as
    uninitialised. Real initialisation happens in create().
  */
  lock_sys_t(): m_initialised(false) {}


  bool is_initialised() const { return m_initialised; }

#ifdef UNIV_PFS_RWLOCK
  /** Acquire exclusive lock_sys.latch */
  ATTRIBUTE_NOINLINE
  void wr_lock(const char *file, unsigned line);
  /** Release exclusive lock_sys.latch */
  ATTRIBUTE_NOINLINE void wr_unlock();
  /** Acquire shared lock_sys.latch */
  ATTRIBUTE_NOINLINE void rd_lock(const char *file, unsigned line);
  /** Release shared lock_sys.latch */
  ATTRIBUTE_NOINLINE void rd_unlock();
#else
  /** Acquire exclusive lock_sys.latch */
  void wr_lock()
  {
    mysql_mutex_assert_not_owner(&wait_mutex);
    ut_ad(!is_writer());
    latch.wr_lock();
    ut_ad(!writer.exchange(pthread_self(),
                           std::memory_order_relaxed));
  }
  /** Release exclusive lock_sys.latch */
  void wr_unlock()
  {
    ut_ad(writer.exchange(0, std::memory_order_relaxed) ==
          pthread_self());
    latch.wr_unlock();
  }
  /** Acquire shared lock_sys.latch */
  void rd_lock()
  {
    mysql_mutex_assert_not_owner(&wait_mutex);
    ut_ad(!is_writer());
    latch.rd_lock();
    ut_ad(!writer.load(std::memory_order_relaxed));
    ut_d(readers.fetch_add(1, std::memory_order_relaxed));
  }
  /** Release shared lock_sys.latch */
  void rd_unlock()
  {
    ut_ad(!is_writer());
    ut_ad(readers.fetch_sub(1, std::memory_order_relaxed));
    latch.rd_unlock();
  }
#endif
  /** Try to acquire exclusive lock_sys.latch
  @return whether the latch was acquired */
  bool wr_lock_try()
  {
    ut_ad(!is_writer());
    if (!latch.wr_lock_try()) return false;
    ut_ad(!writer.exchange(pthread_self(),
                           std::memory_order_relaxed));
    return true;
  }
  /** Try to acquire shared lock_sys.latch
  @return whether the latch was acquired */
  bool rd_lock_try()
  {
    ut_ad(!is_writer());
    if (!latch.rd_lock_try()) return false;
    ut_ad(!writer.load(std::memory_order_relaxed));
    ut_d(readers.fetch_add(1, std::memory_order_relaxed));
    return true;
  }

  /** Assert that wr_lock() has been invoked by this thread */
  void assert_locked() const { ut_ad(is_writer()); }
  /** Assert that wr_lock() has not been invoked by this thread */
  void assert_unlocked() const { ut_ad(!is_writer()); }
#ifdef UNIV_DEBUG
  /** @return whether the current thread is the lock_sys.latch writer */
  bool is_writer() const
  {
# ifdef SUX_LOCK_GENERIC
    return writer.load(std::memory_order_relaxed) == pthread_self();
# else
    return writer.load(std::memory_order_relaxed) == pthread_self() ||
      (xtest() && !latch.is_locked_or_waiting());
# endif
  }
  /** Assert that a lock shard is exclusively latched (by some thread) */
  void assert_locked(const lock_t &lock) const;
  /** Assert that a table lock shard is exclusively latched by this thread */
  void assert_locked(const dict_table_t &table) const;
  /** Assert that a hash table cell is exclusively latched (by some thread) */
  void assert_locked(const hash_cell_t &cell) const;
#else
  void assert_locked(const lock_t &) const {}
  void assert_locked(const dict_table_t &) const {}
  void assert_locked(const hash_cell_t &) const {}
#endif

  /**
    Creates the lock system at database start.

    @param[in] n_cells number of slots in lock hash table
  */
  void create(ulint n_cells);


  /**
    Resize the lock hash table.

    @param[in] n_cells number of slots in lock hash table
  */
  void resize(ulint n_cells);


  /** Closes the lock system at database shutdown. */
  void close();


  /** Check for deadlocks while holding only lock_sys.wait_mutex. */
  void deadlock_check();

  /** Cancel a waiting lock request.
  @tparam check_victim  whether to check for DB_DEADLOCK
  @param trx            active transaction
  @param lock           waiting lock request
  @retval DB_SUCCESS    if no lock existed
  @retval DB_DEADLOCK   if trx->lock.was_chosen_as_deadlock_victim was set
  @retval DB_LOCK_WAIT  if the lock was canceled */
  template<bool check_victim>
  static dberr_t cancel(trx_t *trx, lock_t *lock);
  /** Cancel a waiting lock request (if any) when killing a transaction */
  static void cancel(trx_t *trx);

  /** Note that a record lock wait started */
  inline void wait_start();

  /** Note that a record lock wait resumed */
  inline void wait_resume(THD *thd, my_hrtime_t start, my_hrtime_t now);

  /** @return pending number of lock waits */
  ulint get_wait_pending() const
  {
    return static_cast<ulint>(wait_count & (WAIT_COUNT_STEP - 1));
  }
  /** @return cumulative number of lock waits */
  ulint get_wait_cumulative() const
  { return static_cast<ulint>(wait_count / WAIT_COUNT_STEP); }
  /** Cumulative wait time; protected by wait_mutex */
  ulint get_wait_time_cumulative() const { return wait_time; }
  /** Longest wait time; protected by wait_mutex */
  ulint get_wait_time_max() const { return wait_time_max; }

  /** Get the lock hash table for a mode */
  hash_table &hash_get(ulint mode)
  {
    if (UNIV_LIKELY(!(mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE))))
      return rec_hash;
    return (mode & LOCK_PREDICATE) ? prdt_hash : prdt_page_hash;
  }

  /** Get the lock hash table for predicate a mode */
  hash_table &prdt_hash_get(bool page)
  { return page ? prdt_page_hash : prdt_hash; }

  /** Get the first lock on a page.
  @param cell        hash table cell
  @param id          page number
  @return first lock
  @retval nullptr if none exists */
  static inline lock_t *get_first(const hash_cell_t &cell, page_id_t id);

  /** Get the first explicit lock request on a record.
  @param cell     first lock hash table cell
  @param id       page identifier
  @param heap_no  record identifier in page
  @return first lock
  @retval nullptr if none exists */
  static inline lock_t *get_first(const hash_cell_t &cell, page_id_t id,
                                  ulint heap_no);

  /** Remove locks on a discarded SPATIAL INDEX page.
  @param id   page to be discarded
  @param page whether to discard also from lock_sys.prdt_hash */
  void prdt_page_free_from_discard(const page_id_t id, bool all= false);

  /** Cancel possible lock waiting for a transaction */
  static void cancel_lock_wait_for_trx(trx_t *trx);
};

/** The lock system */
extern lock_sys_t lock_sys;

/** @return the index of an array element */
inline ulint lock_sys_t::hash_table::calc_hash(ulint fold) const
{
  ut_ad(lock_sys.is_writer() || lock_sys.readers);
  return calc_hash(fold, n_cells);
}

/** Get a hash table cell. */
inline hash_cell_t *lock_sys_t::hash_table::cell_get(ulint fold) const
{
  ut_ad(lock_sys.is_writer() || lock_sys.readers);
  return &array[calc_hash(fold)];
}

/** Get the first lock on a page.
@param cell        hash table cell
@param id          page number
@return first lock
@retval nullptr if none exists */
inline lock_t *lock_sys_t::get_first(const hash_cell_t &cell, page_id_t id)
{
  lock_sys.assert_locked(cell);
  for (auto lock= static_cast<lock_t*>(cell.node); lock; lock= lock->hash)
  {
    ut_ad(!lock->is_table());
    if (lock->un_member.rec_lock.page_id == id)
      return lock;
  }
  return nullptr;
}

/** lock_sys.latch exclusive guard */
struct LockMutexGuard
{
  LockMutexGuard(SRW_LOCK_ARGS(const char *file, unsigned line))
  { lock_sys.wr_lock(SRW_LOCK_ARGS(file, line)); }
  ~LockMutexGuard() { lock_sys.wr_unlock(); }
};

/** lock_sys latch guard for 1 page_id_t */
struct LockGuard
{
  LockGuard(lock_sys_t::hash_table &hash, const page_id_t id);
  ~LockGuard()
  {
    lock_sys_t::hash_table::latch(cell_)->release();
    /* Must be last, to avoid a race with lock_sys_t::hash_table::resize() */
    lock_sys.rd_unlock();
  }
  /** @return the hash array cell */
  hash_cell_t &cell() const { return *cell_; }
private:
  /** The hash array cell */
  hash_cell_t *cell_;
};

/** lock_sys latch guard for 2 page_id_t */
struct LockMultiGuard
{
  LockMultiGuard(lock_sys_t::hash_table &hash,
                 const page_id_t id1, const page_id_t id2);
  ~LockMultiGuard();

  /** @return the first hash array cell */
  hash_cell_t &cell1() const { return *cell1_; }
  /** @return the second hash array cell */
  hash_cell_t &cell2() const { return *cell2_; }
private:
  /** The first hash array cell */
  hash_cell_t *cell1_;
  /** The second hash array cell */
  hash_cell_t *cell2_;
};

/** lock_sys.latch exclusive guard using transactional memory */
struct TMLockMutexGuard
{
  TRANSACTIONAL_INLINE
  TMLockMutexGuard(SRW_LOCK_ARGS(const char *file, unsigned line))
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (xbegin())
    {
      if (was_elided())
        return;
      xabort();
    }
#endif
    lock_sys.wr_lock(SRW_LOCK_ARGS(file, line));
  }
  TRANSACTIONAL_INLINE
  ~TMLockMutexGuard()
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (was_elided()) xend(); else
#endif
    lock_sys.wr_unlock();
  }

#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
  bool was_elided() const noexcept
  { return !lock_sys.latch.is_locked_or_waiting(); }
#else
  bool was_elided() const noexcept { return false; }
#endif
};

/** lock_sys latch guard for 1 page_id_t, using transactional memory */
struct TMLockGuard
{
  TRANSACTIONAL_TARGET
  TMLockGuard(lock_sys_t::hash_table &hash, const page_id_t id);
  TRANSACTIONAL_INLINE ~TMLockGuard()
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (elided)
    {
      xend();
      return;
    }
#endif
    lock_sys_t::hash_table::latch(cell_)->release();
    /* Must be last, to avoid a race with lock_sys_t::hash_table::resize() */
    lock_sys.rd_unlock();
  }
  /** @return the hash array cell */
  hash_cell_t &cell() const { return *cell_; }
private:
  /** The hash array cell */
  hash_cell_t *cell_;
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
  /** whether the latches were elided */
  bool elided;
#endif
};

/** guard for shared lock_sys.latch and trx_t::mutex using
transactional memory */
struct TMLockTrxGuard
{
  trx_t &trx;

  TRANSACTIONAL_INLINE
#ifndef UNIV_PFS_RWLOCK
  TMLockTrxGuard(trx_t &trx) : trx(trx)
# define TMLockTrxArgs(trx) trx
#else
  TMLockTrxGuard(const char *file, unsigned line, trx_t &trx) : trx(trx)
# define TMLockTrxArgs(trx) SRW_LOCK_CALL, trx
#endif
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (xbegin())
    {
      if (!lock_sys.latch.is_write_locked() && was_elided())
        return;
      xabort();
    }
#endif
    lock_sys.rd_lock(SRW_LOCK_ARGS(file, line));
    trx.mutex_lock();
  }
  TRANSACTIONAL_INLINE
  ~TMLockTrxGuard()
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (was_elided())
    {
      xend();
      return;
    }
#endif
    lock_sys.rd_unlock();
    trx.mutex_unlock();
  }
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
  bool was_elided() const noexcept { return !trx.mutex_is_locked(); }
#else
  bool was_elided() const noexcept { return false; }
#endif
};

/** guard for trx_t::mutex using transactional memory */
struct TMTrxGuard
{
  trx_t &trx;

  TRANSACTIONAL_INLINE TMTrxGuard(trx_t &trx) : trx(trx)
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (xbegin())
    {
      if (was_elided())
        return;
      xabort();
    }
#endif
    trx.mutex_lock();
  }
  TRANSACTIONAL_INLINE ~TMTrxGuard()
  {
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
    if (was_elided())
    {
      xend();
      return;
    }
#endif
    trx.mutex_unlock();
  }
#if !defined NO_ELISION && !defined SUX_LOCK_GENERIC
  bool was_elided() const noexcept { return !trx.mutex_is_locked(); }
#else
  bool was_elided() const noexcept { return false; }
#endif
};

/*********************************************************************//**
Creates a new record lock and inserts it to the lock queue. Does NOT check
for deadlocks or lock compatibility!
@return created lock */
UNIV_INLINE
lock_t*
lock_rec_create(
/*============*/
	lock_t*			c_lock,	/*!< conflicting lock */
	unsigned		type_mode,/*!< in: lock mode and wait flag */
	const buf_block_t*	block,	/*!< in: buffer block containing
					the record */
	ulint			heap_no,/*!< in: heap number of the record */
	dict_index_t*		index,	/*!< in: index of record */
	trx_t*			trx,	/*!< in,out: transaction */
	bool			caller_owns_trx_mutex);
					/*!< in: true if caller owns
					trx mutex */

/** Remove a record lock request, waiting or granted, on a discarded page
@param hash     hash table
@param in_lock  lock object */
void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock);

/** Create a new record lock and inserts it to the lock queue,
without checking for deadlocks or conflicts.
@param[in]	c_lock		conflicting lock, or NULL
@param[in]	type_mode	lock mode and wait flag
@param[in]	page_id		index page number
@param[in]	page		R-tree index page, or NULL
@param[in]	heap_no		record heap number in the index page
@param[in]	index		the index tree
@param[in,out]	trx		transaction
@param[in]	holds_trx_mutex	whether the caller holds trx->mutex
@return created lock */
lock_t*
lock_rec_create_low(
	lock_t*		c_lock,
	unsigned	type_mode,
	const page_id_t	page_id,
	const page_t*	page,
	ulint		heap_no,
	dict_index_t*	index,
	trx_t*		trx,
	bool		holds_trx_mutex);

/** Enqueue a waiting request for a lock which cannot be granted immediately.
Check for deadlocks.
@param[in]	c_lock		conflicting lock
@param[in]	type_mode	the requested lock mode (LOCK_S or LOCK_X)
				possibly ORed with LOCK_GAP or
				LOCK_REC_NOT_GAP, ORed with
				LOCK_INSERT_INTENTION if this
				waiting lock request is set
				when performing an insert of
				an index record
@param[in]	id		page identifier
@param[in]	page		leaf page in the index
@param[in]	heap_no		record heap number in the block
@param[in]	index		index tree
@param[in,out]	thr		query thread
@param[in]	prdt		minimum bounding box (spatial index)
@retval	DB_LOCK_WAIT		if the waiting lock was enqueued
@retval	DB_DEADLOCK		if this transaction was chosen as the victim */
dberr_t
lock_rec_enqueue_waiting(
	lock_t*			c_lock,
	unsigned		type_mode,
	const page_id_t		id,
	const page_t*		page,
	ulint			heap_no,
	dict_index_t*		index,
	que_thr_t*		thr,
	lock_prdt_t*		prdt);
/*************************************************************//**
Moves the explicit locks on user records to another page if a record
list start is moved to another page. */
void
lock_rtr_move_rec_list(
/*===================*/
	const buf_block_t*	new_block,	/*!< in: index page to
						move to */
	const buf_block_t*	block,		/*!< in: index page */
	rtr_rec_move_t*		rec_move,	/*!< in: recording records
						moved */
	ulint			num_move);	/*!< in: num of rec to move */

#include "lock0lock.inl"

#endif