Drizzled Public API Documentation

row0vers.cc
1 /*****************************************************************************
2 
3 Copyright (C) 1997, 2009, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #include "row0vers.h"
27 
28 #ifdef UNIV_NONINL
29 #include "row0vers.ic"
30 #endif
31 
32 #include "dict0dict.h"
33 #include "dict0boot.h"
34 #include "btr0btr.h"
35 #include "mach0data.h"
36 #include "trx0rseg.h"
37 #include "trx0trx.h"
38 #include "trx0roll.h"
39 #include "trx0undo.h"
40 #include "trx0purge.h"
41 #include "trx0rec.h"
42 #include "que0que.h"
43 #include "row0row.h"
44 #include "row0upd.h"
45 #include "rem0cmp.h"
46 #include "read0read.h"
47 #include "lock0lock.h"
48 
49 /*****************************************************************/
54 UNIV_INTERN
55 trx_t*
57 /*==============================*/
58  const rec_t* rec,
59  dict_index_t* index,
60  const ulint* offsets)
61 {
62  dict_index_t* clust_index;
63  rec_t* clust_rec;
64  ulint* clust_offsets;
65  rec_t* version;
66  trx_id_t trx_id;
67  mem_heap_t* heap;
68  mem_heap_t* heap2;
69  dtuple_t* row;
70  dtuple_t* entry = NULL; /* assignment to eliminate compiler
71  warning */
72  trx_t* trx;
73  ulint rec_del;
74 #ifdef UNIV_DEBUG
75  ulint err;
76 #endif /* UNIV_DEBUG */
77  mtr_t mtr;
78  ulint comp;
79 
80  ut_ad(mutex_own(&kernel_mutex));
81 #ifdef UNIV_SYNC_DEBUG
82  ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
83 #endif /* UNIV_SYNC_DEBUG */
84 
85  mutex_exit(&kernel_mutex);
86 
87  mtr_start(&mtr);
88 
89  /* Search for the clustered index record: this is a time-consuming
90  operation: therefore we release the kernel mutex; also, the release
91  is required by the latching order convention. The latch on the
92  clustered index locks the top of the stack of versions. We also
93  reserve purge_latch to lock the bottom of the version stack. */
94 
95  clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index,
96  &clust_index, &mtr);
97  if (!clust_rec) {
98  /* In a rare case it is possible that no clust rec is found
99  for a secondary index record: if in row0umod.c
100  row_undo_mod_remove_clust_low() we have already removed the
101  clust rec, while purge is still cleaning and removing
102  secondary index records associated with earlier versions of
103  the clustered index record. In that case there cannot be
104  any implicit lock on the secondary index record, because
105  an active transaction which has modified the secondary index
106  record has also modified the clustered index record. And in
107  a rollback we always undo the modifications to secondary index
108  records before the clustered index record. */
109 
110  mutex_enter(&kernel_mutex);
111  mtr_commit(&mtr);
112 
113  return(NULL);
114  }
115 
116  heap = mem_heap_create(1024);
117  clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL,
118  ULINT_UNDEFINED, &heap);
119  trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets);
120 
121  mtr_s_lock(&(purge_sys->latch), &mtr);
122 
123  mutex_enter(&kernel_mutex);
124 
125  trx = NULL;
126  if (!trx_is_active(trx_id)) {
127  /* The transaction that modified or inserted clust_rec is no
128  longer active: no implicit lock on rec */
129  goto exit_func;
130  }
131 
132  if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index,
133  clust_offsets, TRUE)) {
134  /* Corruption noticed: try to avoid a crash by returning */
135  goto exit_func;
136  }
137 
138  comp = page_rec_is_comp(rec);
139  ut_ad(index->table == clust_index->table);
140  ut_ad(!!comp == dict_table_is_comp(index->table));
141  ut_ad(!comp == !page_rec_is_comp(clust_rec));
142 
143  /* We look up if some earlier version, which was modified by the trx_id
144  transaction, of the clustered index record would require rec to be in
145  a different state (delete marked or unmarked, or have different field
146  values, or not existing). If there is such a version, then rec was
147  modified by the trx_id transaction, and it has an implicit x-lock on
148  rec. Note that if clust_rec itself would require rec to be in a
149  different state, then the trx_id transaction has not yet had time to
150  modify rec, and does not necessarily have an implicit x-lock on rec. */
151 
152  rec_del = rec_get_deleted_flag(rec, comp);
153  trx = NULL;
154 
155  version = clust_rec;
156 
157  for (;;) {
158  rec_t* prev_version;
159  ulint vers_del;
160  row_ext_t* ext;
161  trx_id_t prev_trx_id;
162 
163  mutex_exit(&kernel_mutex);
164 
165  /* While we retrieve an earlier version of clust_rec, we
166  release the kernel mutex, because it may take time to access
167  the disk. After the release, we have to check if the trx_id
168  transaction is still active. We keep the semaphore in mtr on
169  the clust_rec page, so that no other transaction can update
170  it and get an implicit x-lock on rec. */
171 
172  heap2 = heap;
173  heap = mem_heap_create(1024);
174 #ifdef UNIV_DEBUG
175  err =
176 #endif /* UNIV_DEBUG */
177  trx_undo_prev_version_build(clust_rec, &mtr, version,
178  clust_index, clust_offsets,
179  heap, &prev_version);
180  mem_heap_free(heap2); /* free version and clust_offsets */
181 
182  if (prev_version == NULL) {
183  mutex_enter(&kernel_mutex);
184 
185  if (!trx_is_active(trx_id)) {
186  /* Transaction no longer active: no
187  implicit x-lock */
188 
189  break;
190  }
191 
192  /* If the transaction is still active,
193  clust_rec must be a fresh insert, because no
194  previous version was found. */
195  ut_ad(err == DB_SUCCESS);
196 
197  /* It was a freshly inserted version: there is an
198  implicit x-lock on rec */
199 
200  trx = trx_get_on_id(trx_id);
201 
202  break;
203  }
204 
205  clust_offsets = rec_get_offsets(prev_version, clust_index,
206  NULL, ULINT_UNDEFINED, &heap);
207 
208  vers_del = rec_get_deleted_flag(prev_version, comp);
209  prev_trx_id = row_get_rec_trx_id(prev_version, clust_index,
210  clust_offsets);
211 
212  /* If the trx_id and prev_trx_id are different and if
213  the prev_version is marked deleted then the
214  prev_trx_id must have already committed for the trx_id
215  to be able to modify the row. Therefore, prev_trx_id
216  cannot hold any implicit lock. */
217  if (vers_del && trx_id != prev_trx_id) {
218 
219  mutex_enter(&kernel_mutex);
220  break;
221  }
222 
223  /* The stack of versions is locked by mtr. Thus, it
224  is safe to fetch the prefixes for externally stored
225  columns. */
226  row = row_build(ROW_COPY_POINTERS, clust_index, prev_version,
227  clust_offsets, NULL, &ext, heap);
228  entry = row_build_index_entry(row, ext, index, heap);
229  /* entry may be NULL if a record was inserted in place
230  of a deleted record, and the BLOB pointers of the new
231  record were not initialized yet. But in that case,
232  prev_version should be NULL. */
233  ut_a(entry);
234 
235  mutex_enter(&kernel_mutex);
236 
237  if (!trx_is_active(trx_id)) {
238  /* Transaction no longer active: no implicit x-lock */
239 
240  break;
241  }
242 
243  /* If we get here, we know that the trx_id transaction is
244  still active and it has modified prev_version. Let us check
245  if prev_version would require rec to be in a different
246  state. */
247 
248  /* The previous version of clust_rec must be
249  accessible, because the transaction is still active
250  and clust_rec was not a fresh insert. */
251  ut_ad(err == DB_SUCCESS);
252 
253  /* We check if entry and rec are identified in the alphabetical
254  ordering */
255  if (0 == cmp_dtuple_rec(entry, rec, offsets)) {
256  /* The delete marks of rec and prev_version should be
257  equal for rec to be in the state required by
258  prev_version */
259 
260  if (rec_del != vers_del) {
261  trx = trx_get_on_id(trx_id);
262 
263  break;
264  }
265 
266  /* It is possible that the row was updated so that the
267  secondary index record remained the same in
268  alphabetical ordering, but the field values changed
269  still. For example, 'abc' -> 'ABC'. Check also that. */
270 
272  dtuple_get_n_fields(entry));
273  if (0 != cmp_dtuple_rec(entry, rec, offsets)) {
274 
275  trx = trx_get_on_id(trx_id);
276 
277  break;
278  }
279  } else if (!rec_del) {
280  /* The delete mark should be set in rec for it to be
281  in the state required by prev_version */
282 
283  trx = trx_get_on_id(trx_id);
284 
285  break;
286  }
287 
288  if (trx_id != prev_trx_id) {
289  /* The versions modified by the trx_id transaction end
290  to prev_version: no implicit x-lock */
291 
292  break;
293  }
294 
295  version = prev_version;
296  }/* for (;;) */
297 
298 exit_func:
299  mtr_commit(&mtr);
300  mem_heap_free(heap);
301 
302  return(trx);
303 }
304 
305 /*****************************************************************/
309 UNIV_INTERN
310 ibool
312 /*==============================*/
313  trx_id_t trx_id,
314  mtr_t* mtr)
317 {
318 #ifdef UNIV_SYNC_DEBUG
319  ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
320 #endif /* UNIV_SYNC_DEBUG */
321 
322  mtr_s_lock(&(purge_sys->latch), mtr);
323 
324  if (trx_purge_update_undo_must_exist(trx_id)) {
325 
326  /* A purge operation is not yet allowed to remove this
327  delete marked record */
328 
329  return(TRUE);
330  }
331 
332  return(FALSE);
333 }
334 
335 /*****************************************************************/
342 UNIV_INTERN
343 ibool
345 /*=========================*/
346  ibool also_curr,
349  const rec_t* rec,
351  mtr_t* mtr,
353  dict_index_t* index,
354  const dtuple_t* ientry)
355 {
356  const rec_t* version;
357  rec_t* prev_version;
358  dict_index_t* clust_index;
359  ulint* clust_offsets;
360  mem_heap_t* heap;
361  mem_heap_t* heap2;
362  const dtuple_t* row;
363  const dtuple_t* entry;
364  ulint err;
365  ulint comp;
366 
367  ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
368  || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
369 #ifdef UNIV_SYNC_DEBUG
370  ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
371 #endif /* UNIV_SYNC_DEBUG */
372  mtr_s_lock(&(purge_sys->latch), mtr);
373 
374  clust_index = dict_table_get_first_index(index->table);
375 
376  comp = page_rec_is_comp(rec);
377  ut_ad(!dict_table_is_comp(index->table) == !comp);
378  heap = mem_heap_create(1024);
379  clust_offsets = rec_get_offsets(rec, clust_index, NULL,
380  ULINT_UNDEFINED, &heap);
381 
382  if (also_curr && !rec_get_deleted_flag(rec, comp)) {
383  row_ext_t* ext;
384 
385  /* The stack of versions is locked by mtr.
386  Thus, it is safe to fetch the prefixes for
387  externally stored columns. */
388  row = row_build(ROW_COPY_POINTERS, clust_index,
389  rec, clust_offsets, NULL, &ext, heap);
390  entry = row_build_index_entry(row, ext, index, heap);
391 
392  /* If entry == NULL, the record contains unset BLOB
393  pointers. This must be a freshly inserted record. If
394  this is called from
395  row_purge_remove_sec_if_poss_low(), the thread will
396  hold latches on the clustered index and the secondary
397  index. Because the insert works in three steps:
398 
399  (1) insert the record to clustered index
400  (2) store the BLOBs and update BLOB pointers
401  (3) insert records to secondary indexes
402 
403  the purge thread can safely ignore freshly inserted
404  records and delete the secondary index record. The
405  thread that inserted the new record will be inserting
406  the secondary index records. */
407 
408  /* NOTE that we cannot do the comparison as binary
409  fields because the row is maybe being modified so that
410  the clustered index record has already been updated to
411  a different binary value in a char field, but the
412  collation identifies the old and new value anyway! */
413  if (entry && !dtuple_coll_cmp(ientry, entry)) {
414 
415  mem_heap_free(heap);
416 
417  return(TRUE);
418  }
419  }
420 
421  version = rec;
422 
423  for (;;) {
424  heap2 = heap;
425  heap = mem_heap_create(1024);
426  err = trx_undo_prev_version_build(rec, mtr, version,
427  clust_index, clust_offsets,
428  heap, &prev_version);
429  mem_heap_free(heap2); /* free version and clust_offsets */
430 
431  if (err != DB_SUCCESS || !prev_version) {
432  /* Versions end here */
433 
434  mem_heap_free(heap);
435 
436  return(FALSE);
437  }
438 
439  clust_offsets = rec_get_offsets(prev_version, clust_index,
440  NULL, ULINT_UNDEFINED, &heap);
441 
442  if (!rec_get_deleted_flag(prev_version, comp)) {
443  row_ext_t* ext;
444 
445  /* The stack of versions is locked by mtr.
446  Thus, it is safe to fetch the prefixes for
447  externally stored columns. */
448  row = row_build(ROW_COPY_POINTERS, clust_index,
449  prev_version, clust_offsets,
450  NULL, &ext, heap);
451  entry = row_build_index_entry(row, ext, index, heap);
452 
453  /* If entry == NULL, the record contains unset
454  BLOB pointers. This must be a freshly
455  inserted record that we can safely ignore.
456  For the justification, see the comments after
457  the previous row_build_index_entry() call. */
458 
459  /* NOTE that we cannot do the comparison as binary
460  fields because maybe the secondary index record has
461  already been updated to a different binary value in
462  a char field, but the collation identifies the old
463  and new value anyway! */
464 
465  if (entry && !dtuple_coll_cmp(ientry, entry)) {
466 
467  mem_heap_free(heap);
468 
469  return(TRUE);
470  }
471  }
472 
473  version = prev_version;
474  }
475 }
476 
477 /*****************************************************************/
482 UNIV_INTERN
483 ulint
485 /*===============================*/
486  const rec_t* rec,
490  mtr_t* mtr,
491  dict_index_t* index,
492  ulint** offsets,
494  read_view_t* view,
495  mem_heap_t** offset_heap,
497  mem_heap_t* in_heap,
501  rec_t** old_vers)
504 {
505  const rec_t* version;
506  rec_t* prev_version;
507  trx_id_t trx_id;
508  mem_heap_t* heap = NULL;
509  byte* buf;
510  ulint err;
511 
512  ut_ad(dict_index_is_clust(index));
513  ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
514  || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
515 #ifdef UNIV_SYNC_DEBUG
516  ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
517 #endif /* UNIV_SYNC_DEBUG */
518 
519  ut_ad(rec_offs_validate(rec, index, *offsets));
520 
521  trx_id = row_get_rec_trx_id(rec, index, *offsets);
522 
523  ut_ad(!read_view_sees_trx_id(view, trx_id));
524 
526  version = rec;
527 
528  for (;;) {
529  mem_heap_t* heap2 = heap;
530  trx_undo_rec_t* undo_rec;
531  roll_ptr_t roll_ptr;
532  undo_no_t undo_no;
533  heap = mem_heap_create(1024);
534 
535  /* If we have high-granularity consistent read view and
536  creating transaction of the view is the same as trx_id in
537  the record we see this record only in the case when
538  undo_no of the record is < undo_no in the view. */
539 
540  if (view->type == VIEW_HIGH_GRANULARITY
541  && view->creator_trx_id == trx_id) {
542 
543  roll_ptr = row_get_rec_roll_ptr(version, index,
544  *offsets);
545  undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
546  undo_no = trx_undo_rec_get_undo_no(undo_rec);
547  mem_heap_empty(heap);
548 
549  if (view->undo_no > undo_no) {
550  /* The view already sees this version: we can
551  copy it to in_heap and return */
552 
553  buf = static_cast<byte *>(mem_heap_alloc(in_heap,
554  rec_offs_size(*offsets)));
555  *old_vers = rec_copy(buf, version, *offsets);
556  rec_offs_make_valid(*old_vers, index,
557  *offsets);
558  err = DB_SUCCESS;
559 
560  break;
561  }
562  }
563 
564  err = trx_undo_prev_version_build(rec, mtr, version, index,
565  *offsets, heap,
566  &prev_version);
567  if (heap2) {
568  mem_heap_free(heap2); /* free version */
569  }
570 
571  if (err != DB_SUCCESS) {
572  break;
573  }
574 
575  if (prev_version == NULL) {
576  /* It was a freshly inserted version */
577  *old_vers = NULL;
578  err = DB_SUCCESS;
579 
580  break;
581  }
582 
583  *offsets = rec_get_offsets(prev_version, index, *offsets,
584  ULINT_UNDEFINED, offset_heap);
585 
586  trx_id = row_get_rec_trx_id(prev_version, index, *offsets);
587 
588  if (read_view_sees_trx_id(view, trx_id)) {
589 
590  /* The view already sees this version: we can copy
591  it to in_heap and return */
592 
593  buf = static_cast<byte *>(mem_heap_alloc(in_heap, rec_offs_size(*offsets)));
594  *old_vers = rec_copy(buf, prev_version, *offsets);
595  rec_offs_make_valid(*old_vers, index, *offsets);
596  err = DB_SUCCESS;
597 
598  break;
599  }
600 
601  version = prev_version;
602  }/* for (;;) */
603 
604  mem_heap_free(heap);
605  rw_lock_s_unlock(&(purge_sys->latch));
606 
607  return(err);
608 }
609 
610 /*****************************************************************/
614 UNIV_INTERN
615 ulint
617 /*====================================*/
618  const rec_t* rec,
622  mtr_t* mtr,
623  dict_index_t* index,
624  ulint** offsets,
626  mem_heap_t** offset_heap,
628  mem_heap_t* in_heap,
632  const rec_t** old_vers)
635 {
636  const rec_t* version;
637  mem_heap_t* heap = NULL;
638  byte* buf;
639  ulint err;
640  trx_id_t rec_trx_id = 0;
641 
642  ut_ad(dict_index_is_clust(index));
643  ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
644  || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
645 #ifdef UNIV_SYNC_DEBUG
646  ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
647 #endif /* UNIV_SYNC_DEBUG */
648 
649  ut_ad(rec_offs_validate(rec, index, *offsets));
650 
652  /* The S-latch on purge_sys prevents the purge view from
653  changing. Thus, if we have an uncommitted transaction at
654  this point, then purge cannot remove its undo log even if
655  the transaction could commit now. */
656 
657  version = rec;
658 
659  for (;;) {
660  trx_t* version_trx;
661  mem_heap_t* heap2;
662  rec_t* prev_version;
663  trx_id_t version_trx_id;
664 
665  version_trx_id = row_get_rec_trx_id(version, index, *offsets);
666  if (rec == version) {
667  rec_trx_id = version_trx_id;
668  }
669 
670  mutex_enter(&kernel_mutex);
671  version_trx = trx_get_on_id(version_trx_id);
672  if (version_trx
673  && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY
674  || version_trx->conc_state == TRX_NOT_STARTED)) {
675 
676  version_trx = NULL;
677  }
678  mutex_exit(&kernel_mutex);
679 
680  if (!version_trx) {
681 
682  /* We found a version that belongs to a
683  committed transaction: return it. */
684 
685  if (rec == version) {
686  *old_vers = rec;
687  err = DB_SUCCESS;
688  break;
689  }
690 
691  /* We assume that a rolled-back transaction stays in
692  TRX_ACTIVE state until all the changes have been
693  rolled back and the transaction is removed from
694  the global list of transactions. */
695 
696  if (rec_trx_id == version_trx_id) {
697  /* The transaction was committed while
698  we searched for earlier versions.
699  Return the current version as a
700  semi-consistent read. */
701 
702  version = rec;
703  *offsets = rec_get_offsets(version,
704  index, *offsets,
705  ULINT_UNDEFINED,
706  offset_heap);
707  }
708 
709  buf = static_cast<byte *>(mem_heap_alloc(in_heap, rec_offs_size(*offsets)));
710  *old_vers = rec_copy(buf, version, *offsets);
711  rec_offs_make_valid(*old_vers, index, *offsets);
712  err = DB_SUCCESS;
713 
714  break;
715  }
716 
717  heap2 = heap;
718  heap = mem_heap_create(1024);
719 
720  err = trx_undo_prev_version_build(rec, mtr, version, index,
721  *offsets, heap,
722  &prev_version);
723  if (heap2) {
724  mem_heap_free(heap2); /* free version */
725  }
726 
727  if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
728  break;
729  }
730 
731  if (prev_version == NULL) {
732  /* It was a freshly inserted version */
733  *old_vers = NULL;
734  err = DB_SUCCESS;
735 
736  break;
737  }
738 
739  version = prev_version;
740  *offsets = rec_get_offsets(version, index, *offsets,
741  ULINT_UNDEFINED, offset_heap);
742  }/* for (;;) */
743 
744  if (heap) {
745  mem_heap_free(heap);
746  }
747  rw_lock_s_unlock(&(purge_sys->latch));
748 
749  return(err);
750 }
UNIV_INTERN rec_t * row_get_clust_rec(ulint mode, const rec_t *rec, dict_index_t *index, dict_index_t **clust_index, mtr_t *mtr)
Definition: row0row.cc:699
UNIV_INLINE ibool read_view_sees_trx_id(const read_view_t *view, trx_id_t trx_id)
byte trx_undo_rec_t
Definition: trx0types.h:112
UNIV_INLINE trx_id_t row_get_rec_trx_id(const rec_t *rec, dict_index_t *index, const ulint *offsets)
UNIV_INLINE trx_t * trx_get_on_id(trx_id_t trx_id)
ib_id_t roll_ptr_t
Definition: trx0types.h:87
UNIV_INLINE roll_ptr_t row_get_rec_roll_ptr(const rec_t *rec, dict_index_t *index, const ulint *offsets)
UNIV_INTERN int cmp_dtuple_rec(const dtuple_t *dtuple, const rec_t *rec, const ulint *offsets)
Definition: rem0cmp.cc:642
#define mtr_s_lock(B, MTR)
Definition: mtr0mtr.h:336
UNIV_INTERN dtuple_t * row_build(ulint type, const dict_index_t *index, const rec_t *rec, const ulint *offsets, const dict_table_t *col_table, row_ext_t **ext, mem_heap_t *heap)
Definition: row0row.cc:175
UNIV_INTERN int dtuple_coll_cmp(const dtuple_t *tuple1, const dtuple_t *tuple2)
Definition: data0data.cc:91
rw_lock_t latch
Definition: trx0purge.h:139
UNIV_INTERN trx_undo_rec_t * trx_undo_get_undo_rec_low(roll_ptr_t roll_ptr, mem_heap_t *heap)
Definition: trx0rec.cc:1337
UNIV_INLINE ibool dict_table_is_comp(const dict_table_t *table)
undo_no_t undo_no
Definition: read0read.h:127
UNIV_INTERN dtuple_t * row_build_index_entry(const dtuple_t *row, row_ext_t *ext, dict_index_t *index, mem_heap_t *heap)
Definition: row0row.cc:87
ulint conc_state
Definition: trx0trx.h:480
UNIV_INLINE rec_t * rec_copy(void *buf, const rec_t *rec, const ulint *offsets)
#define mem_heap_free(heap)
Definition: mem0mem.h:117
UNIV_INLINE ulint dtuple_get_n_fields(const dtuple_t *tuple)
UNIV_INLINE ulint rec_get_deleted_flag(const rec_t *rec, ulint comp)
UNIV_INTERN void mtr_commit(mtr_t *mtr) __attribute__((nonnull))
Definition: mtr0mtr.cc:247
UNIV_INLINE ulint page_rec_is_comp(const rec_t *rec)
UNIV_INLINE ulint dict_index_is_clust(const dict_index_t *index) __attribute__((pure))
UNIV_INTERN ibool trx_purge_update_undo_must_exist(trx_id_t trx_id)
Definition: trx0purge.cc:72
UNIV_INTERN ibool row_vers_old_has_index_entry(ibool also_curr, const rec_t *rec, mtr_t *mtr, dict_index_t *index, const dtuple_t *ientry)
Definition: row0vers.cc:344
#define VIEW_HIGH_GRANULARITY
Definition: read0read.h:171
#define ut_a(EXPR)
Definition: ut0dbg.h:105
UNIV_INLINE void * mem_heap_alloc(mem_heap_t *heap, ulint n)
#define mem_heap_create(N)
Definition: mem0mem.h:97
dict_table_t * table
Definition: dict0mem.h:341
UNIV_INLINE ibool trx_is_active(trx_id_t trx_id)
UNIV_INTERN ibool row_vers_must_preserve_del_marked(trx_id_t trx_id, mtr_t *mtr)
Definition: row0vers.cc:311
UNIV_INTERN ulint row_vers_build_for_consistent_read(const rec_t *rec, mtr_t *mtr, dict_index_t *index, ulint **offsets, read_view_t *view, mem_heap_t **offset_heap, mem_heap_t *in_heap, rec_t **old_vers)
Definition: row0vers.cc:484
UNIV_INLINE void mem_heap_empty(mem_heap_t *heap)
#define rw_lock_s_lock(M)
Definition: sync0rw.h:155
UNIV_INLINE undo_no_t trx_undo_rec_get_undo_no(const trx_undo_rec_t *undo_rec)
#define ut_ad(EXPR)
Definition: ut0dbg.h:127
ib_id_t trx_id_t
Definition: trx0types.h:85
trx_purge_t * purge_sys
Definition: trx0purge.cc:48
UNIV_INTERN ulint row_vers_build_for_semi_consistent_read(const rec_t *rec, mtr_t *mtr, dict_index_t *index, ulint **offsets, mem_heap_t **offset_heap, mem_heap_t *in_heap, const rec_t **old_vers)
Definition: row0vers.cc:616
UNIV_INLINE void dtuple_set_types_binary(dtuple_t *tuple, ulint n)
UNIV_INLINE void mtr_start(mtr_t *mtr) __attribute__((nonnull))
UNIV_INLINE ulint rec_offs_size(const ulint *offsets)
UNIV_INTERN ulint trx_undo_prev_version_build(const rec_t *index_rec, mtr_t *index_mtr, const rec_t *rec, dict_index_t *index, ulint *offsets, mem_heap_t *heap, rec_t **old_vers)
Definition: trx0rec.cc:1413
UNIV_INTERN trx_t * row_vers_impl_x_locked_off_kernel(const rec_t *rec, dict_index_t *index, const ulint *offsets)
Definition: row0vers.cc:56
trx_id_t creator_trx_id
Definition: read0read.h:156
UNIV_INTERN ibool lock_check_trx_id_sanity(trx_id_t trx_id, const rec_t *rec, dict_index_t *index, const ulint *offsets, ibool has_kernel_mutex)
Definition: lock0lock.cc:449
ib_id_t undo_no_t
Definition: trx0types.h:89
UNIV_INLINE ibool rec_offs_validate(const rec_t *rec, const dict_index_t *index, const ulint *offsets)