Drizzled Public API Documentation

buf0buddy.cc
1 /*****************************************************************************
2 
3 Copyright (C) 2006, 2010, Innobase Oy. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License as published by the Free Software
7 Foundation; version 2 of the License.
8 
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12 
13 You should have received a copy of the GNU General Public License along with
14 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15 St, Fifth Floor, Boston, MA 02110-1301 USA
16 
17 *****************************************************************************/
18 
19 /**************************************************/
26 #define THIS_MODULE
27 #include "buf0buddy.h"
28 #ifdef UNIV_NONINL
29 # include "buf0buddy.ic"
30 #endif
31 #undef THIS_MODULE
32 #include "buf0buf.h"
33 #include "buf0lru.h"
34 #include "buf0flu.h"
35 #include "page0zip.h"
36 
37 /**********************************************************************/
40 UNIV_INLINE
41 byte*
42 buf_buddy_get(
43 /*==========*/
44  byte* page,
45  ulint size)
46 {
47  ut_ad(ut_is_2pow(size));
48  ut_ad(size >= BUF_BUDDY_LOW);
49  ut_ad(size < BUF_BUDDY_HIGH);
50  ut_ad(!ut_align_offset(page, size));
51 
52  if (((ulint) page) & size) {
53  return(page - size);
54  } else {
55  return(page + size);
56  }
57 }
58 
59 /**********************************************************************/
61 UNIV_INLINE
62 void
63 buf_buddy_add_to_free(
64 /*==================*/
65  buf_pool_t* buf_pool,
66  buf_page_t* bpage,
67  ulint i)
69 {
70 #ifdef UNIV_DEBUG_VALGRIND
71  buf_page_t* b = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
72 
73  if (b) UNIV_MEM_VALID(b, BUF_BUDDY_LOW << i);
74 #endif /* UNIV_DEBUG_VALGRIND */
75 
76  ut_ad(buf_pool_mutex_own(buf_pool));
78  ut_ad(buf_pool->zip_free[i].start != bpage);
79  UT_LIST_ADD_FIRST(list, buf_pool->zip_free[i], bpage);
80 
81 #ifdef UNIV_DEBUG_VALGRIND
82  if (b) UNIV_MEM_FREE(b, BUF_BUDDY_LOW << i);
83  UNIV_MEM_ASSERT_AND_FREE(bpage, BUF_BUDDY_LOW << i);
84 #endif /* UNIV_DEBUG_VALGRIND */
85 }
86 
87 /**********************************************************************/
89 UNIV_INLINE
90 void
91 buf_buddy_remove_from_free(
92 /*=======================*/
93  buf_pool_t* buf_pool,
94  buf_page_t* bpage,
95  ulint i)
97 {
98 #ifdef UNIV_DEBUG_VALGRIND
99  buf_page_t* prev = UT_LIST_GET_PREV(list, bpage);
100  buf_page_t* next = UT_LIST_GET_NEXT(list, bpage);
101 
102  if (prev) UNIV_MEM_VALID(prev, BUF_BUDDY_LOW << i);
103  if (next) UNIV_MEM_VALID(next, BUF_BUDDY_LOW << i);
104 
105  ut_ad(!prev || buf_page_get_state(prev) == BUF_BLOCK_ZIP_FREE);
106  ut_ad(!next || buf_page_get_state(next) == BUF_BLOCK_ZIP_FREE);
107 #endif /* UNIV_DEBUG_VALGRIND */
108 
109  ut_ad(buf_pool_mutex_own(buf_pool));
111  UT_LIST_REMOVE(list, buf_pool->zip_free[i], bpage);
112 
113 #ifdef UNIV_DEBUG_VALGRIND
114  if (prev) UNIV_MEM_FREE(prev, BUF_BUDDY_LOW << i);
115  if (next) UNIV_MEM_FREE(next, BUF_BUDDY_LOW << i);
116 #endif /* UNIV_DEBUG_VALGRIND */
117 }
118 
119 /**********************************************************************/
122 static
123 void*
124 buf_buddy_alloc_zip(
125 /*================*/
126  buf_pool_t* buf_pool,
127  ulint i)
128 {
129  buf_page_t* bpage;
130 
131  ut_ad(buf_pool_mutex_own(buf_pool));
132  ut_a(i < BUF_BUDDY_SIZES);
133 
134 #ifndef UNIV_DEBUG_VALGRIND
135  /* Valgrind would complain about accessing free memory. */
136  ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i],
137  ut_ad(buf_page_get_state(ut_list_node_313)
138  == BUF_BLOCK_ZIP_FREE)));
139 #endif /* !UNIV_DEBUG_VALGRIND */
140  bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
141 
142  if (bpage) {
143  UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i);
145 
146  buf_buddy_remove_from_free(buf_pool, bpage, i);
147  } else if (i + 1 < BUF_BUDDY_SIZES) {
148  /* Attempt to split. */
149  bpage = (buf_page_t *)buf_buddy_alloc_zip(buf_pool, i + 1);
150 
151  if (bpage) {
152  buf_page_t* buddy = (buf_page_t*)
153  (((char*) bpage) + (BUF_BUDDY_LOW << i));
154 
155  ut_ad(!buf_pool_contains_zip(buf_pool, buddy));
156  ut_d(memset(buddy, i, BUF_BUDDY_LOW << i));
157  buddy->state = BUF_BLOCK_ZIP_FREE;
158  buf_buddy_add_to_free(buf_pool, buddy, i);
159  }
160  }
161 
162 #ifdef UNIV_DEBUG
163  if (bpage) {
164  memset(bpage, ~i, BUF_BUDDY_LOW << i);
165  }
166 #endif /* UNIV_DEBUG */
167 
168  UNIV_MEM_ALLOC(bpage, BUF_BUDDY_SIZES << i);
169 
170  return(bpage);
171 }
172 
173 /**********************************************************************/
175 static
176 void
177 buf_buddy_block_free(
178 /*=================*/
179  buf_pool_t* buf_pool,
180  void* buf)
181 {
182  const ulint fold = BUF_POOL_ZIP_FOLD_PTR(buf);
183  buf_page_t* bpage;
184  buf_block_t* block;
185 
186  ut_ad(buf_pool_mutex_own(buf_pool));
187  ut_ad(!mutex_own(&buf_pool->zip_mutex));
188  ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE));
189 
190  HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage,
192  && bpage->in_zip_hash && !bpage->in_page_hash),
193  ((buf_block_t*) bpage)->frame == buf);
194  ut_a(bpage);
196  ut_ad(!bpage->in_page_hash);
197  ut_ad(bpage->in_zip_hash);
198  ut_d(bpage->in_zip_hash = FALSE);
199  HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage);
200 
201  ut_d(memset(buf, 0, UNIV_PAGE_SIZE));
202  UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE);
203 
204  block = (buf_block_t*) bpage;
205  mutex_enter(&block->mutex);
207  mutex_exit(&block->mutex);
208 
209  ut_ad(buf_pool->buddy_n_frames > 0);
210  ut_d(buf_pool->buddy_n_frames--);
211 }
212 
213 /**********************************************************************/
215 static
216 void
217 buf_buddy_block_register(
218 /*=====================*/
219  buf_block_t* block)
220 {
221  buf_pool_t* buf_pool = buf_pool_from_block(block);
222  const ulint fold = BUF_POOL_ZIP_FOLD(block);
223  ut_ad(buf_pool_mutex_own(buf_pool));
224  ut_ad(!mutex_own(&buf_pool->zip_mutex));
226 
228 
229  ut_a(block->frame);
230  ut_a(!ut_align_offset(block->frame, UNIV_PAGE_SIZE));
231 
232  ut_ad(!block->page.in_page_hash);
233  ut_ad(!block->page.in_zip_hash);
234  ut_d(block->page.in_zip_hash = TRUE);
235  HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page);
236 
237  ut_d(buf_pool->buddy_n_frames++);
238 }
239 
240 /**********************************************************************/
243 static
244 void*
245 buf_buddy_alloc_from(
246 /*=================*/
247  buf_pool_t* buf_pool,
248  void* buf,
249  ulint i,
251  ulint j)
253 {
254  ulint offs = BUF_BUDDY_LOW << j;
255  ut_ad(j <= BUF_BUDDY_SIZES);
256  ut_ad(j >= i);
257  ut_ad(!ut_align_offset(buf, offs));
258 
259  /* Add the unused parts of the block to the free lists. */
260  while (j > i) {
261  buf_page_t* bpage;
262 
263  offs >>= 1;
264  j--;
265 
266  bpage = (buf_page_t*) ((byte*) buf + offs);
267  ut_d(memset(bpage, j, BUF_BUDDY_LOW << j));
268  bpage->state = BUF_BLOCK_ZIP_FREE;
269 #ifndef UNIV_DEBUG_VALGRIND
270  /* Valgrind would complain about accessing free memory. */
271  ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i],
273  ut_list_node_313)
274  == BUF_BLOCK_ZIP_FREE)));
275 #endif /* !UNIV_DEBUG_VALGRIND */
276  buf_buddy_add_to_free(buf_pool, bpage, j);
277  }
278 
279  return(buf);
280 }
281 
282 /**********************************************************************/
287 UNIV_INTERN
288 void*
289 buf_buddy_alloc_low(
290 /*================*/
291  buf_pool_t* buf_pool,
292  ulint i,
294  ibool* lru)
300 {
301  buf_block_t* block;
302 
303  ut_ad(buf_pool_mutex_own(buf_pool));
304  ut_ad(!mutex_own(&buf_pool->zip_mutex));
305 
306  if (i < BUF_BUDDY_SIZES) {
307  /* Try to allocate from the buddy system. */
308  block = (buf_block_t *)buf_buddy_alloc_zip(buf_pool, i);
309 
310  if (block) {
311  goto func_exit;
312  }
313  }
314 
315  /* Try allocating from the buf_pool->free list. */
316  block = buf_LRU_get_free_only(buf_pool);
317 
318  if (block) {
319 
320  goto alloc_big;
321  }
322 
323  if (!lru) {
324 
325  return(NULL);
326  }
327 
328  /* Try replacing an uncompressed page in the buffer pool. */
329  buf_pool_mutex_exit(buf_pool);
330  block = buf_LRU_get_free_block(buf_pool);
331  *lru = TRUE;
332  buf_pool_mutex_enter(buf_pool);
333 
334 alloc_big:
335  buf_buddy_block_register(block);
336 
337  block = (buf_block_t *)buf_buddy_alloc_from(buf_pool, block->frame,
338  i, BUF_BUDDY_SIZES);
339 
340 func_exit:
341  buf_pool->buddy_stat[i].used++;
342  return(block);
343 }
344 
345 /**********************************************************************/
348 static
349 ibool
350 buf_buddy_relocate_block(
351 /*=====================*/
352  buf_page_t* bpage,
353  buf_page_t* dpage)
354 {
355  buf_page_t* b;
356  buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
357 
358  ut_ad(buf_pool_mutex_own(buf_pool));
359 
360  switch (buf_page_get_state(bpage)) {
361  case BUF_BLOCK_ZIP_FREE:
362  case BUF_BLOCK_NOT_USED:
364  case BUF_BLOCK_FILE_PAGE:
365  case BUF_BLOCK_MEMORY:
367  ut_error;
368  case BUF_BLOCK_ZIP_DIRTY:
369  /* Cannot relocate dirty pages. */
370  return(FALSE);
371 
372  case BUF_BLOCK_ZIP_PAGE:
373  break;
374  }
375 
376  mutex_enter(&buf_pool->zip_mutex);
377 
378  if (!buf_page_can_relocate(bpage)) {
379  mutex_exit(&buf_pool->zip_mutex);
380  return(FALSE);
381  }
382 
383  buf_relocate(bpage, dpage);
384  ut_d(bpage->state = BUF_BLOCK_ZIP_FREE);
385 
386  /* relocate buf_pool->zip_clean */
387  b = UT_LIST_GET_PREV(list, dpage);
388  UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage);
389 
390  if (b) {
391  UT_LIST_INSERT_AFTER(list, buf_pool->zip_clean, b, dpage);
392  } else {
393  UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage);
394  }
395 
396  UNIV_MEM_INVALID(bpage, sizeof *bpage);
397 
398  mutex_exit(&buf_pool->zip_mutex);
399  return(TRUE);
400 }
401 
402 /**********************************************************************/
405 static
406 ibool
407 buf_buddy_relocate(
408 /*===============*/
409  buf_pool_t* buf_pool,
410  void* src,
411  void* dst,
412  ulint i)
414 {
415  buf_page_t* bpage;
416  ulint space= 0;
417  ulint page_no= 0;
418  const ulint size = BUF_BUDDY_LOW << i;
419  ullint usec = ut_time_us(NULL);
420 
421  ut_ad(buf_pool_mutex_own(buf_pool));
422  ut_ad(!mutex_own(&buf_pool->zip_mutex));
423  ut_ad(!ut_align_offset(src, size));
424  ut_ad(!ut_align_offset(dst, size));
425  UNIV_MEM_ASSERT_W(dst, size);
426 
427  /* We assume that all memory from buf_buddy_alloc()
428  is used for either compressed pages or buf_page_t
429  objects covering compressed pages. */
430 
431  /* We look inside the allocated objects returned by
432  buf_buddy_alloc() and assume that anything of
433  PAGE_ZIP_MIN_SIZE or larger is a compressed page that contains
434  a valid space_id and page_no in the page header. Should the
435  fields be invalid, we will be unable to relocate the block.
436  We also assume that anything that fits sizeof(buf_page_t)
437  actually is a properly initialized buf_page_t object. */
438 
439  if (size >= PAGE_ZIP_MIN_SIZE) {
440  /* This is a compressed page. */
441  mutex_t* mutex;
442 
443  /* The src block may be split into smaller blocks,
444  some of which may be free. Thus, the
445  mach_read_from_4() calls below may attempt to read
446  from free memory. The memory is "owned" by the buddy
447  allocator (and it has been allocated from the buffer
448  pool), so there is nothing wrong about this. The
449  mach_read_from_4() calls here will only trigger bogus
450  Valgrind memcheck warnings in UNIV_DEBUG_VALGRIND builds. */
451  space = mach_read_from_4(
452  (const byte*) src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
453  page_no = mach_read_from_4(
454  (const byte*) src + FIL_PAGE_OFFSET);
455  /* Suppress Valgrind warnings about conditional jump
456  on uninitialized value. */
457  UNIV_MEM_VALID(&space, sizeof space);
458  UNIV_MEM_VALID(&page_no, sizeof page_no);
459  bpage = buf_page_hash_get(buf_pool, space, page_no);
460 
461  if (!bpage || bpage->zip.data != src) {
462  /* The block has probably been freshly
463  allocated by buf_LRU_get_free_block() but not
464  added to buf_pool->page_hash yet. Obviously,
465  it cannot be relocated. */
466 
467  return(FALSE);
468  }
469 
470  ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
471 
472  if (page_zip_get_size(&bpage->zip) != size) {
473  /* The block is of different size. We would
474  have to relocate all blocks covered by src.
475  For the sake of simplicity, give up. */
476  ut_ad(page_zip_get_size(&bpage->zip) < size);
477 
478  return(FALSE);
479  }
480 
481  /* The block must have been allocated, but it may
482  contain uninitialized data. */
483  UNIV_MEM_ASSERT_W(src, size);
484 
485  mutex = buf_page_get_mutex(bpage);
486 
487  mutex_enter(mutex);
488 
489  if (buf_page_can_relocate(bpage)) {
490  /* Relocate the compressed page. */
491  ut_a(bpage->zip.data == src);
492  memcpy(dst, src, size);
493  bpage->zip.data = (page_zip_t *)dst;
494  mutex_exit(mutex);
495 success:
496  UNIV_MEM_INVALID(src, size);
497  {
498  buf_buddy_stat_t* buddy_stat
499  = &buf_pool->buddy_stat[i];
500  buddy_stat->relocated++;
501  buddy_stat->relocated_usec
502  += ut_time_us(NULL) - usec;
503  }
504  return(TRUE);
505  }
506 
507  mutex_exit(mutex);
508  } else if (i == buf_buddy_get_slot(sizeof(buf_page_t))) {
509  /* This must be a buf_page_t object. */
510 #if UNIV_WORD_SIZE == 4
511  /* On 32-bit systems, there is no padding in
512  buf_page_t. On other systems, Valgrind could complain
513  about uninitialized pad bytes. */
514  UNIV_MEM_ASSERT_RW(src, size);
515 #endif
516  if (buf_buddy_relocate_block((buf_page_t *)src, (buf_page_t *)dst)) {
517 
518  goto success;
519  }
520  }
521 
522  return(FALSE);
523 }
524 
525 /**********************************************************************/
527 UNIV_INTERN
528 void
529 buf_buddy_free_low(
530 /*===============*/
531  buf_pool_t* buf_pool,
532  void* buf,
534  ulint i)
536 {
537  buf_page_t* bpage;
538  buf_page_t* buddy;
539 
540  ut_ad(buf_pool_mutex_own(buf_pool));
541  ut_ad(!mutex_own(&buf_pool->zip_mutex));
542  ut_ad(i <= BUF_BUDDY_SIZES);
543  ut_ad(buf_pool->buddy_stat[i].used > 0);
544 
545  buf_pool->buddy_stat[i].used--;
546 recombine:
547  UNIV_MEM_ASSERT_AND_ALLOC(buf, BUF_BUDDY_LOW << i);
548  ut_d(((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE);
549 
550  if (i == BUF_BUDDY_SIZES) {
551  buf_buddy_block_free(buf_pool, buf);
552  return;
553  }
554 
555  ut_ad(i < BUF_BUDDY_SIZES);
556  ut_ad(buf == ut_align_down(buf, BUF_BUDDY_LOW << i));
557  ut_ad(!buf_pool_contains_zip(buf_pool, buf));
558 
559  /* Try to combine adjacent blocks. */
560 
561  buddy = (buf_page_t*) buf_buddy_get(((byte*) buf), BUF_BUDDY_LOW << i);
562 
563 #ifndef UNIV_DEBUG_VALGRIND
564  /* Valgrind would complain about accessing free memory. */
565 
566  if (buddy->state != BUF_BLOCK_ZIP_FREE) {
567 
568  goto buddy_nonfree;
569  }
570 
571  /* The field buddy->state can only be trusted for free blocks.
572  If buddy->state == BUF_BLOCK_ZIP_FREE, the block is free if
573  it is in the free list. */
574 #endif /* !UNIV_DEBUG_VALGRIND */
575 
576  for (bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); bpage; ) {
577  UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i);
579 
580  if (bpage == buddy) {
581 buddy_free:
582  /* The buddy is free: recombine */
583  buf_buddy_remove_from_free(buf_pool, bpage, i);
584 buddy_free2:
586  ut_ad(!buf_pool_contains_zip(buf_pool, buddy));
587  i++;
588  buf = ut_align_down(buf, BUF_BUDDY_LOW << i);
589 
590  goto recombine;
591  }
592 
593  ut_a(bpage != buf);
594 
595  {
596  buf_page_t* next = UT_LIST_GET_NEXT(list, bpage);
597  UNIV_MEM_ASSERT_AND_FREE(bpage, BUF_BUDDY_LOW << i);
598  bpage = next;
599  }
600  }
601 
602 #ifndef UNIV_DEBUG_VALGRIND
603 buddy_nonfree:
604  /* Valgrind would complain about accessing free memory. */
605  ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i],
606  ut_ad(buf_page_get_state(ut_list_node_313)
607  == BUF_BLOCK_ZIP_FREE)));
608 #endif /* UNIV_DEBUG_VALGRIND */
609 
610  /* The buddy is not free. Is there a free block of this size? */
611  bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]);
612 
613  if (bpage) {
614  /* Remove the block from the free list, because a successful
615  buf_buddy_relocate() will overwrite bpage->list. */
616 
617  UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i);
618  buf_buddy_remove_from_free(buf_pool, bpage, i);
619 
620  /* Try to relocate the buddy of buf to the free block. */
621  if (buf_buddy_relocate(buf_pool, buddy, bpage, i)) {
622 
623  ut_d(buddy->state = BUF_BLOCK_ZIP_FREE);
624  goto buddy_free2;
625  }
626 
627  buf_buddy_add_to_free(buf_pool, bpage, i);
628 
629  /* Try to relocate the buddy of the free block to buf. */
630  buddy = (buf_page_t*) buf_buddy_get(((byte*) bpage),
631  BUF_BUDDY_LOW << i);
632 
633 #ifndef UNIV_DEBUG_VALGRIND
634  /* Valgrind would complain about accessing free memory. */
635 
636  /* The buddy must not be (completely) free, because we
637  always recombine adjacent free blocks.
638 
639  (Parts of the buddy can be free in
640  buf_pool->zip_free[j] with j < i.) */
641  ut_d(UT_LIST_VALIDATE(list, buf_page_t, buf_pool->zip_free[i],
643  ut_list_node_313)
645  && ut_list_node_313 != buddy)));
646 #endif /* !UNIV_DEBUG_VALGRIND */
647 
648  if (buf_buddy_relocate(buf_pool, buddy, buf, i)) {
649 
650  buf = bpage;
651  UNIV_MEM_VALID(bpage, BUF_BUDDY_LOW << i);
652  ut_d(buddy->state = BUF_BLOCK_ZIP_FREE);
653  goto buddy_free;
654  }
655  }
656 
657  /* Free the block to the buddy list. */
658  bpage = (buf_page_t *)buf;
659 #ifdef UNIV_DEBUG
660  if (i < buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)) {
661  /* This area has most likely been allocated for at
662  least one compressed-only block descriptor. Check
663  that there are no live objects in the area. This is
664  not a complete check: it may yield false positives as
665  well as false negatives. Also, due to buddy blocks
666  being recombined, it is possible (although unlikely)
667  that this branch is never reached. */
668 
669  char* c;
670 
671 # ifndef UNIV_DEBUG_VALGRIND
672  /* Valgrind would complain about accessing
673  uninitialized memory. Besides, Valgrind performs a
674  more exhaustive check, at every memory access. */
675  const buf_page_t* b = buf;
676  const buf_page_t* const b_end = (buf_page_t*)
677  ((char*) b + (BUF_BUDDY_LOW << i));
678 
679  for (; b < b_end; b++) {
680  /* Avoid false positives (and cause false
681  negatives) by checking for b->space < 1000. */
682 
683  if ((b->state == BUF_BLOCK_ZIP_PAGE
684  || b->state == BUF_BLOCK_ZIP_DIRTY)
685  && b->space > 0 && b->space < 1000) {
686  fprintf(stderr,
687  "buddy dirty %p %u (%u,%u) %p,%lu\n",
688  (void*) b,
689  b->state, b->space, b->offset,
690  buf, i);
691  }
692  }
693 # endif /* !UNIV_DEBUG_VALGRIND */
694 
695  /* Scramble the block. This should make any pointers
696  invalid and trigger a segmentation violation. Because
697  the scrambling can be reversed, it may be possible to
698  track down the object pointing to the freed data by
699  dereferencing the unscrambled bpage->LRU or
700  bpage->list pointers. */
701  for (c = (char*) buf + (BUF_BUDDY_LOW << i);
702  c-- > (char*) buf; ) {
703  *c = ~*c ^ i;
704  }
705  } else {
706  /* Fill large blocks with a constant pattern. */
707  memset(bpage, i, BUF_BUDDY_LOW << i);
708  }
709 #endif /* UNIV_DEBUG */
710  bpage->state = BUF_BLOCK_ZIP_FREE;
711  buf_buddy_add_to_free(buf_pool, bpage, i);
712 }
#define buf_pool_mutex_enter(b)
Definition: buf0buf.h:1765
unsigned offset
Definition: buf0buf.h:1281
#define UT_LIST_GET_NEXT(NAME, N)
Definition: ut0lst.h:201
ib_uint64_t relocated_usec
Definition: buf0buf.h:1599
unsigned state
Definition: buf0buf.h:1284
#define UT_LIST_VALIDATE(NAME, TYPE, BASE, ASSERTION)
Definition: ut0lst.h:244
UNIV_INLINE void buf_block_set_state(buf_block_t *block, enum buf_page_state state)
unsigned space
Definition: buf0buf.h:1279
mutex_t zip_mutex
Definition: buf0buf.h:1613
buf_buddy_stat_t buddy_stat[BUF_BUDDY_SIZES_MAX+1]
Definition: buf0buf.h:1648
UNIV_INTERN void buf_LRU_block_free_non_file_page(buf_block_t *block)
Definition: buf0lru.cc:1676
UNIV_INLINE ulint ut_align_offset(const void *ptr, ulint align_no) __attribute__((const ))
#define ut_d(EXPR)
Definition: ut0dbg.h:129
UNIV_INTERN void buf_relocate(buf_page_t *bpage, buf_page_t *dpage) __attribute__((nonnull))
Definition: buf0buf.cc:1456
UNIV_INTERN ullint ut_time_us(ullint *tloc)
Definition: ut0ut.cc:194
buf_page_t page
Definition: buf0buf.h:1433
ib_uint64_t relocated
Definition: buf0buf.h:1597
#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)
Definition: hash0hash.h:101
#define PAGE_ZIP_MIN_SIZE
Definition: page0types.h:56
The buffer pool structure.
Definition: buf0buf.h:1607
#define UT_LIST_REMOVE(NAME, BASE, N)
Definition: ut0lst.h:178
UNIV_INLINE ibool buf_page_can_relocate(const buf_page_t *bpage) __attribute__((pure))
UNIV_INLINE enum buf_page_state buf_block_get_state(const buf_block_t *block) __attribute__((pure))
page_zip_des_t zip
Definition: buf0buf.h:1308
mutex_t mutex
Definition: buf0buf.h:1452
UNIV_INLINE enum buf_page_state buf_page_get_state(const buf_page_t *bpage)
#define ut_is_2pow(n)
Definition: ut0ut.h:162
#define ut_a(EXPR)
Definition: ut0dbg.h:105
UNIV_INLINE void * ut_align_down(const void *ptr, ulint align_no) __attribute__((const ))
#define BUF_BUDDY_SIZES
Definition: buf0types.h:75
#define UT_LIST_GET_PREV(NAME, N)
Definition: ut0lst.h:209
#define UT_LIST_INSERT_AFTER(NAME, BASE, NODE1, NODE2)
Definition: ut0lst.h:142
UNIV_INLINE buf_page_t * buf_page_hash_get(buf_pool_t *buf_pool, ulint space, ulint offset)
#define UT_LIST_GET_FIRST(BASE)
Definition: ut0lst.h:224
#define BUF_POOL_ZIP_FOLD_PTR(ptr)
Definition: buf0buf.h:1562
#define ut_ad(EXPR)
Definition: ut0dbg.h:127
UNIV_INLINE buf_pool_t * buf_pool_from_bpage(const buf_page_t *bpage)
#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)
Definition: hash0hash.h:176
#define ut_error
Definition: ut0dbg.h:115
hash_table_t * zip_hash
Definition: buf0buf.h:1637
#define FIL_PAGE_OFFSET
Definition: fil0fil.h:82
#define UT_LIST_ADD_FIRST(NAME, BASE, N)
Definition: ut0lst.h:97
UNIV_INLINE ulint mach_read_from_4(const byte *b) __attribute__((nonnull
page_zip_t * data
Definition: page0types.h:68
UNIV_INTERN buf_block_t * buf_LRU_get_free_only(buf_pool_t *buf_pool)
Definition: buf0lru.cc:825
UNIV_INLINE buf_pool_t * buf_pool_from_block(const buf_block_t *block)
UNIV_INLINE mutex_t * buf_page_get_mutex(const buf_page_t *bpage) __attribute__((pure))
#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)
Definition: hash0hash.h:137
#define buf_pool_mutex_own(b)
Definition: buf0buf.h:1763
#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID
Definition: fil0fil.h:156
byte page_zip_t
Definition: page0types.h:42
#define BUF_BUDDY_LOW
Definition: buf0types.h:71
#define buf_pool_mutex_exit(b)
Definition: buf0buf.h:1807
UNIV_INLINE ulint page_zip_get_size(const page_zip_des_t *page_zip) __attribute__((nonnull
UNIV_INTERN buf_block_t * buf_LRU_get_free_block(buf_pool_t *buf_pool) __attribute__((nonnull
#define BUF_BUDDY_HIGH
Definition: buf0types.h:82
UNIV_INTERN ibool buf_pool_watch_is_sentinel(buf_pool_t *buf_pool, const buf_page_t *bpage) UNIV_WARN_UNUSED_RESULT
Definition: buf0buf.cc:1814