summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorArseny Kapoulkine <arseny.kapoulkine@gmail.com>2014-06-01 19:12:36 +0000
committerArseny Kapoulkine <arseny.kapoulkine@gmail.com>2014-06-01 19:12:36 +0000
commit757c4943401d6a1922954cdac6ab8a901c8cbb51 (patch)
treee543fe4d2acd9e2b0edefbf694af6ca45032c454 /src
parent6d43ad2870b9411c409fcd12937e02eb44b2a76c (diff)
Improve XPath allocator performance
When allocating new pages, make sure that the page has at least 1/4 of the base page size free. This makes sure that we can do small allocations after big allocations (i.e. huge node lists) without doing a heap alloc. This is important because XPath stack code always reclaims extra pages after evaluating sub-expressions, so allocating a small chunk of memory and then rolling the state back is a common case (filtering a node list using a predicate usually does this). A better solution involves smarter allocation rollback strategy, but the implemented solution is simple and practical. git-svn-id: https://pugixml.googlecode.com/svn/trunk@999 99668b35-9821-0410-8761-19e4c4f06640
Diffstat (limited to 'src')
-rw-r--r--src/pugixml.cpp16
1 files changed, 11 insertions, 5 deletions
diff --git a/src/pugixml.cpp b/src/pugixml.cpp
index 754f92f..085d4e3 100644
--- a/src/pugixml.cpp
+++ b/src/pugixml.cpp
@@ -6069,6 +6069,7 @@ PUGI__NS_BEGIN
struct xpath_memory_block
{
xpath_memory_block* next;
+ size_t capacity;
char data[
#ifdef PUGIXML_MEMORY_XPATH_PAGE_SIZE
@@ -6098,12 +6099,10 @@ PUGI__NS_BEGIN
void* allocate_nothrow(size_t size)
{
- const size_t block_capacity = sizeof(_root->data);
-
// align size so that we're able to store pointers in subsequent blocks
size = (size + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
- if (_root_size + size <= block_capacity)
+ if (_root_size + size <= _root->capacity)
{
void* buf = _root->data + _root_size;
_root_size += size;
@@ -6111,13 +6110,18 @@ PUGI__NS_BEGIN
}
else
{
- size_t block_data_size = (size > block_capacity) ? size : block_capacity;
- size_t block_size = block_data_size + offsetof(xpath_memory_block, data);
+ // make sure we have at least 1/4th of the page free after allocation to satisfy subsequent allocation requests
+ size_t block_capacity_base = sizeof(_root->data);
+ size_t block_capacity_req = size + block_capacity_base / 4;
+ size_t block_capacity = (block_capacity_base > block_capacity_req) ? block_capacity_base : block_capacity_req;
+
+ size_t block_size = block_capacity + offsetof(xpath_memory_block, data);
xpath_memory_block* block = static_cast<xpath_memory_block*>(xml_memory::allocate(block_size));
if (!block) return 0;
block->next = _root;
+ block->capacity = block_capacity;
_root = block;
_root_size = size;
@@ -6258,6 +6262,7 @@ PUGI__NS_BEGIN
xpath_stack_data(): result(blocks + 0), temp(blocks + 1)
{
blocks[0].next = blocks[1].next = 0;
+ blocks[0].capacity = blocks[1].capacity = sizeof(blocks[0].data);
stack.result = &result;
stack.temp = &temp;
@@ -9990,6 +9995,7 @@ PUGI__NS_BEGIN
xpath_query_impl(): root(0), alloc(&block)
{
block.next = 0;
+ block.capacity = sizeof(block.data);
}
xpath_ast_node* root;