diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 53 | 
1 files changed, 32 insertions, 21 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 77dc0b25140e..3ea4f7bb1837 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2337,9 +2337,12 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,  	if (!buffer->buffers[cpu])  		goto fail_free_buffers; -	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); -	if (ret < 0) -		goto fail_free_buffers; +	/* If already mapped, do not hook to CPU hotplug */ +	if (!start) { +		ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node); +		if (ret < 0) +			goto fail_free_buffers; +	}  	mutex_init(&buffer->mutex); @@ -6725,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)  	}  	for_each_buffer_cpu(buffer, cpu) { +		struct buffer_data_page *old_free_data_page; +		struct list_head old_pages; +		unsigned long flags;  		if (!cpumask_test_cpu(cpu, buffer->cpumask))  			continue;  		cpu_buffer = buffer->buffers[cpu]; +		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); +  		/* Clear the head bit to make the link list normal to read */  		rb_head_page_deactivate(cpu_buffer); -		/* Now walk the list and free all the old sub buffers */ -		list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) { -			list_del_init(&bpage->list); -			free_buffer_page(bpage); -		} -		/* The above loop stopped an the last page needing to be freed */ -		bpage = list_entry(cpu_buffer->pages, struct buffer_page, list); -		free_buffer_page(bpage); - -		/* Free the current reader page */ -		free_buffer_page(cpu_buffer->reader_page); +		/* +		 * Collect buffers from the cpu_buffer pages list and the +		 * reader_page on old_pages, so they can be freed later when not +		 * under a spinlock. The pages list is a linked list with no +		 * head, adding old_pages turns it into a regular list with +		 * old_pages being the head. +		 */ +		list_add(&old_pages, cpu_buffer->pages); +		list_add(&cpu_buffer->reader_page->list, &old_pages);  		/* One page was allocated for the reader page */  		cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,  						     struct buffer_page, list);  		list_del_init(&cpu_buffer->reader_page->list); -		/* The cpu_buffer pages are a link list with no head */ +		/* Install the new pages, remove the head from the list */  		cpu_buffer->pages = cpu_buffer->new_pages.next; -		cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev; -		cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next; - -		/* Clear the new_pages list */ -		INIT_LIST_HEAD(&cpu_buffer->new_pages); +		list_del_init(&cpu_buffer->new_pages);  		cpu_buffer->head_page  			= list_entry(cpu_buffer->pages, struct buffer_page, list); @@ -6766,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)  		cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;  		cpu_buffer->nr_pages_to_update = 0; -		free_pages((unsigned long)cpu_buffer->free_page, old_order); +		old_free_data_page = cpu_buffer->free_page;  		cpu_buffer->free_page = NULL;  		rb_head_page_activate(cpu_buffer); +		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); + +		/* Free old sub buffers */ +		list_for_each_entry_safe(bpage, tmp, &old_pages, list) { +			list_del_init(&bpage->list); +			free_buffer_page(bpage); +		} +		free_pages((unsigned long)old_free_data_page, old_order); +  		rb_check_pages(cpu_buffer);  	}  | 
