Project

General

Profile

Misc #14762 ยป 0001-gc.c-use-ccan-list.patch

normalperson (Eric Wong), 05/16/2018 01:09 AM

View differences:

gc.c
496 496

  
497 497
    struct heap_page *free_pages;
498 498
    struct heap_page *using_page;
499
    struct heap_page *pages;
500
    struct heap_page *sweep_pages;
499
    struct list_head pages;
500
    struct heap_page *sweep_pos;
501 501
#if GC_ENABLE_INCREMENTAL_MARK
502 502
    struct heap_page *pooled_pages;
503 503
#endif
......
671 671
};
672 672

  
673 673
struct heap_page {
674
    struct heap_page *prev;
675 674
    short total_slots;
676 675
    short free_slots;
677 676
    short final_slots;
......
685 684
    struct heap_page *free_next;
686 685
    RVALUE *start;
687 686
    RVALUE *freelist;
688
    struct heap_page *next;
687
    struct list_node node;
689 688

  
690 689
#if USE_RGENGC
691 690
    bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
......
800 799
#else
801 800
#define will_be_incremental_marking(objspace) FALSE
802 801
#endif
803
#define has_sweeping_pages(heap)         ((heap)->sweep_pages != 0)
802
#define has_sweeping_pages(heap)         ((heap)->sweep_pos != 0)
804 803
#define is_lazy_sweeping(heap)           (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
805 804

  
806 805
#if SIZEOF_LONG == SIZEOF_VOIDP
......
1334 1333
    rb_objspace_t *objspace = &rb_objspace;
1335 1334
#endif
1336 1335
    malloc_limit = gc_params.malloc_limit_min;
1336
    list_head_init(&objspace->eden_heap.pages);
1337
    list_head_init(&objspace->tomb_heap.pages);
1337 1338

  
1338 1339
    return objspace;
1339 1340
}
......
1372 1373

  
1373 1374
	objspace->eden_heap.total_pages = 0;
1374 1375
	objspace->eden_heap.total_slots = 0;
1375
	objspace->eden_heap.pages = NULL;
1376 1376
    }
1377 1377
    free_stack_chunks(&objspace->mark_stack);
1378 1378
#if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE)
......
1475 1475
static void
1476 1476
heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1477 1477
{
1478
    if (page->prev) page->prev->next = page->next;
1479
    if (page->next) page->next->prev = page->prev;
1480
    if (heap->pages == page) heap->pages = page->next;
1481
    page->prev = NULL;
1482
    page->next = NULL;
1478
    list_del(&page->node);
1483 1479
    heap->total_pages--;
1484 1480
    heap->total_slots -= page->total_slots;
1485 1481
}
......
1498 1494
{
1499 1495
    size_t i, j;
1500 1496

  
1501
    if (heap_tomb->pages) {
1497
    if (!list_empty(&heap_tomb->pages)) {
1502 1498
	for (i = j = 1; j < heap_allocated_pages; i++) {
1503 1499
	    struct heap_page *page = heap_pages_sorted[i];
1504 1500

  
......
1605 1601
static struct heap_page *
1606 1602
heap_page_resurrect(rb_objspace_t *objspace)
1607 1603
{
1608
    struct heap_page *page = heap_tomb->pages;
1604
    struct heap_page *page = 0, *next;
1609 1605

  
1610
    while (page) {
1606
    list_for_each_safe(&heap_tomb->pages, page, next, node) {
1611 1607
	if (page->freelist != NULL) {
1612 1608
	    heap_unlink_page(objspace, heap_tomb, page);
1613 1609
	    return page;
1614 1610
	}
1615
	page = page->next;
1616 1611
    }
1617 1612

  
1618

  
1619

  
1620 1613
    return NULL;
1621 1614
}
1622 1615

  
......
1643 1636
heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1644 1637
{
1645 1638
    page->flags.in_tomb = (heap == heap_tomb);
1646
    page->next = heap->pages;
1647
    if (heap->pages) heap->pages->prev = page;
1648
    heap->pages = page;
1639
    list_add(&heap->pages, &page->node);
1649 1640
    heap->total_pages++;
1650 1641
    heap->total_slots += page->total_slots;
1651 1642
}
......
3649 3640
static void
3650 3641
gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3651 3642
{
3652
    heap->sweep_pages = heap->pages;
3643
    heap->sweep_pos = list_top(&heap->pages, struct heap_page, node);
3653 3644
    heap->free_pages = NULL;
3654 3645
#if GC_ENABLE_INCREMENTAL_MARK
3655 3646
    heap->pooled_pages = NULL;
......
3700 3691
static int
3701 3692
gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3702 3693
{
3703
    struct heap_page *sweep_page = heap->sweep_pages;
3694
    struct heap_page *sweep_page = heap->sweep_pos;
3704 3695
    int unlink_limit = 3;
3705 3696
#if GC_ENABLE_INCREMENTAL_MARK
3706 3697
    int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
......
3716 3707
    gc_prof_sweep_timer_start(objspace);
3717 3708
#endif
3718 3709

  
3719
    while (sweep_page) {
3720
	struct heap_page *next_sweep_page = heap->sweep_pages = sweep_page->next;
3710
    do {
3721 3711
	int free_slots = gc_page_sweep(objspace, heap, sweep_page);
3712
	heap->sweep_pos = list_next(&heap->pages, sweep_page, node);
3722 3713

  
3723 3714
	if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
3724 3715
	    heap_pages_freeable_pages > 0 &&
......
3748 3739
	else {
3749 3740
	    sweep_page->free_next = NULL;
3750 3741
	}
3742
    } while ((sweep_page = heap->sweep_pos));
3751 3743

  
3752
	sweep_page = next_sweep_page;
3753
    }
3754

  
3755
    if (heap->sweep_pages == NULL) {
3744
    if (!heap->sweep_pos) {
3756 3745
	gc_sweep_finish(objspace);
3757 3746
    }
3758 3747

  
......
3810 3799
    else {
3811 3800
	struct heap_page *page;
3812 3801
	gc_sweep_start(objspace);
3813
	page = heap_eden->sweep_pages;
3814
	while (page) {
3815
	    page->flags.before_sweep = TRUE;
3816
	    page = page->next;
3817
	}
3802

  
3803
        list_for_each(&heap_eden->pages, page, node) {
3804
            page->flags.before_sweep = TRUE;
3805
        }
3818 3806
	gc_sweep_step(objspace, heap_eden);
3819 3807
    }
3820 3808

  
......
5294 5282
}
5295 5283

  
5296 5284
static int
5297
gc_verify_heap_pages_(rb_objspace_t *objspace, struct heap_page *page)
5285
gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
5298 5286
{
5299 5287
    int remembered_old_objects = 0;
5288
    struct heap_page *page = 0;
5300 5289

  
5301
    while (page) {
5290
    list_for_each(head, page, node) {
5302 5291
	if (page->flags.has_remembered_objects == FALSE) {
5303 5292
	    remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
5304 5293
	}
5305
	page = page->next;
5306 5294
    }
5307 5295

  
5308 5296
    return remembered_old_objects;
......
5312 5300
gc_verify_heap_pages(rb_objspace_t *objspace)
5313 5301
{
5314 5302
    int remembered_old_objects = 0;
5315
    remembered_old_objects = gc_verify_heap_pages_(objspace, heap_eden->pages);
5316
    remembered_old_objects = gc_verify_heap_pages_(objspace, heap_tomb->pages);
5303
    remembered_old_objects = gc_verify_heap_pages_(objspace, &heap_eden->pages);
5304
    remembered_old_objects = gc_verify_heap_pages_(objspace, &heap_tomb->pages);
5317 5305
    return remembered_old_objects;
5318 5306
}
5319 5307

  
......
5454 5442
static void
5455 5443
gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
5456 5444
{
5457
    struct heap_page *page = heap_eden->pages;
5445
    struct heap_page *page = 0;
5458 5446

  
5459
    while (page) {
5447
    list_for_each(&heap_eden->pages, page, node) {
5460 5448
	bits_t *mark_bits = page->mark_bits;
5461 5449
	bits_t *wbun_bits = page->wb_unprotected_bits;
5462 5450
	RVALUE *p = page->start;
......
5481 5469
		} while (bits);
5482 5470
	    }
5483 5471
	}
5484

  
5485
	page = page->next;
5486 5472
    }
5487 5473

  
5488 5474
    gc_mark_stacked_objects_all(objspace);
......
5852 5838
rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
5853 5839
{
5854 5840
    size_t j;
5855
    struct heap_page *page = heap->pages;
5841
    struct heap_page *page = 0;
5856 5842
#if PROFILE_REMEMBERSET_MARK
5857 5843
    int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5858 5844
#endif
5859 5845
    gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
5860 5846

  
5861
    while (page) {
5847
    list_for_each(&heap->pages, page, node) {
5862 5848
	if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
5863 5849
	    RVALUE *p = page->start;
5864 5850
	    RVALUE *offset = p - NUM_IN_PAGE(p);
......
5903 5889
	    skip++;
5904 5890
	}
5905 5891
#endif
5906

  
5907
	page = page->next;
5908 5892
    }
5909 5893

  
5910 5894
#if PROFILE_REMEMBERSET_MARK
......
5916 5900
static void
5917 5901
rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
5918 5902
{
5919
    struct heap_page *page = heap->pages;
5903
    struct heap_page *page = 0;
5920 5904

  
5921
    while (page) {
5905
    list_for_each(&heap->pages, page, node) {
5922 5906
	memset(&page->mark_bits[0],       0, HEAP_PAGE_BITMAP_SIZE);
5923 5907
	memset(&page->marking_bits[0],    0, HEAP_PAGE_BITMAP_SIZE);
5924 5908
	memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
5925 5909
	page->flags.has_uncollectible_shady_objects = FALSE;
5926 5910
	page->flags.has_remembered_objects = FALSE;
5927
	page = page->next;
5928 5911
    }
5929 5912
}
5930 5913

  
5931
-