*****************************************************************************/
#include "TCImplementation.h"
-//#include "HeapProfiler.h" // FIXME remove
+//#define DEBUG_MEMUSAGE
+#ifdef DEBUG_MEMUSAGE
+#include "HeapProfiler.h" // FIXME remove
+#endif
#include <iostream>
#include <map>
{
// Save file version info
-const uchar TCImplementation::versionFlag = 4;
+const uchar TCImplementation::versionFlag = 6;
/**
* Constructor inits an empty dynamic FM-index.
return false; // Empty texts are not indexed
}
-uchar* TCImplementation::GetText(DocId k) const
+uchar * TCImplementation::GetText(DocId k) const
{
assert(k < (DocId)numberOfTexts);
- TextPosition i = k;
+
+ return textStorage->GetText(k);
+/* TextPosition i = k;
string result;
// Reserve average string length to avoid reallocs
res[i] = '\0';
for (ulong j = 0; j < i; ++j)
res[i-j-1] = result[j];
- return res;
+ return res;*/
}
/*
* Not supported
TextCollection::document_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
- ulong sampled_rank_i = 0;
// Check each occurrence
for (; sp <= ep; ++sp)
{
TextPosition i = sp;
uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
+ while (c != '\0' && !sampled->access(i))
{
i = C[c]+alphabetrank->rank(c,i)-1;
c = alphabetrank->access(i);
}
else // Sampled position
{
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
+ DocId docId = (*suffixDocId)[sampled->rank1(i)-1];
result.push_back(docId);
}
}
TextCollection::document_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
- ulong sampled_rank_i = 0;
// Check each occurrence, already within [begin, end]
for (; sp <= ep; ++sp)
{
TextPosition i = sp;
uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
+ while (c != '\0' && !sampled->access(i))
{
i = C[c]+alphabetrank->rank(c,i)-1;
c = alphabetrank->access(i);
}
else // Sampled position
{
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
+ DocId docId = (*suffixDocId)[sampled->rank1(i)-1];
result.push_back(docId);
}
}
// We want unique document indentifiers, using std::set to collect them
std::set<DocId> resultSet;
-
- ulong sampled_rank_i = 0;
- // Check each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping
- c = alphabetrank->access(i);
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- resultSet.insert(Doc->access(endmarkerRank));
- }
- else
- {
- DocId di = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
- assert((unsigned)di < numberOfTexts);
- resultSet.insert(di);
- }
- }
+ EnumerateDocuments(resultSet, sp, ep);
// Convert std::set to std::vector
TextCollection::document_result result(resultSet.begin(), resultSet.end());
// We want unique document indentifiers, using std::set to collect them
std::set<DocId> resultSet;
-
- ulong sampled_rank_i = 0;
- // Check each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping
- c = alphabetrank->access(i);
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- DocId docId = Doc->access(endmarkerRank);
- if (docId >= begin && docId <= end)
- resultSet.insert(docId);
- }
- else
- {
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
- assert((unsigned)docId < numberOfTexts);
- if (docId >= begin && docId <= end)
- resultSet.insert(docId);
- }
- }
+ EnumerateDocuments(resultSet, sp, ep, begin, end);
// Convert std::set to std::vector
TextCollection::document_result result(resultSet.begin(), resultSet.end());
return EnumerateEndmarkers(sp, ep, begin, end);
}
+
+TextCollection::document_result TCImplementation::Kmismaches(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::document_result(); // empty result set
+
+ suffix_range_vector ranges;
+ kmismatches(ranges, pattern, 0, n-1, m, k);
+ std::set<DocId> resultSet;
+
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumerateDocuments(resultSet, (*it).first, (*it).second);
+
+ // Convert std::set to std::vector
+ TextCollection::document_result result(resultSet.begin(), resultSet.end());
+ return result;
+}
+
+TextCollection::document_result TCImplementation::Kerrors(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::document_result(); // empty result set
+
+ suffix_range_vector ranges;
+ ulong *dd = new ulong[m+1];
+ for (ulong i=0;i<m+1;i++)
+ dd[i]=i;
+ kerrors(ranges, pattern, 0, n-1, m+k, k, dd, m);
+ delete [] dd;
+
+ std::set<DocId> resultSet;
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumerateDocuments(resultSet, (*it).first, (*it).second);
+
+ // Convert std::set to std::vector
+ TextCollection::document_result result(resultSet.begin(), resultSet.end());
+ return result;
+}
+
+
/**
* Full result set queries
*/
full_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
-
- ulong sampled_rank_i = 0;
- // Report each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- TextPosition dist = 0;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1;
- c = alphabetrank->access(i);
- ++ dist;
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- DocId docId = Doc->access(endmarkerRank);
- result.push_back(make_pair(docId, dist));
- }
- else
- {
- TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist;
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
-// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text
-
- result.push_back(make_pair(docId, textPos));
- }
- }
+ EnumeratePositions(result, sp, ep);
return result;
}
full_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
+ EnumeratePositions(result, sp, ep, begin, end);
+
+ return result;
+}
- ulong sampled_rank_i = 0;
- // Report each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- TextPosition dist = 0;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1;
- c = alphabetrank->access(i);
- ++ dist;
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
-
- // End-marker that we found belongs to the "preceeding" doc in collection:
- DocId docId = Doc->access(endmarkerRank);
- if (docId >= begin && docId <= end)
- result.push_back(make_pair(docId, dist));
- }
- else
- {
- TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist;
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
-// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text
+TextCollection::full_result TCImplementation::FullKmismatches(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::full_result(); // empty result set
- if (docId >= begin && docId <= end)
- result.push_back(make_pair(docId, textPos));
- }
- }
-
+ suffix_range_vector ranges;
+ ulong count = kmismatches(ranges, pattern, 0, n-1, m, k);
+
+ TextCollection::full_result result;
+ result.reserve(count); // avoid reallocation.
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumeratePositions(result, (*it).first, (*it).second);
+ return result;
+}
+
+TextCollection::full_result TCImplementation::FullKerrors(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::full_result(); // empty result set
+
+ suffix_range_vector ranges;
+ ulong *dd = new ulong[m+1];
+ for (unsigned i=0;i<m+1;i++)
+ dd[i]=i;
+ ulong count = kerrors(ranges, pattern, 0, n-1, m+k, k, dd, m);
+ delete [] dd;
+
+ TextCollection::full_result result;
+ result.reserve(count); // avoid reallocation.
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumeratePositions(result, (*it).first, (*it).second);
return result;
}
throw std::runtime_error("TCImplementation::Save(): file write error (bwt end position).");
alphabetrank->save(file);
- sampled->Save(file);
+ sampled->save(file);
suffixes->Save(file);
suffixDocId->Save(file);
throw std::runtime_error("TCImplementation::Save(): file write error (maxTextLength).");
Doc->save(file);
+ textStorage->Save(file);
fflush(file);
}
throw std::runtime_error("TCImplementation::Load(): file read error (bwt end position).");
alphabetrank = static_sequence::load(file);
- sampled = new BSGAP(file);
+ sampled = static_bitsequence::load(file);
suffixes = new BlockArray(file);
suffixDocId = new BlockArray(file);
throw std::runtime_error("TCImplementation::Load(): file read error (maxTextLength).");
Doc = static_sequence::load(file);
+ textStorage = new TextStorage(file);
// FIXME Construct data structures with new samplerate
//maketables();
/**
* Rest of the functions follow...
*/
+ulong TCImplementation::searchPrefix(uchar const *pattern, ulong i, ulong *sp, ulong *ep) const
+{
+ int c;
+ while (*sp<=*ep && i>=1)
+ {
+ c = (int)pattern[--i];
+ *sp = C[c]+alphabetrank->rank(c,*sp-1);
+ *ep = C[c]+alphabetrank->rank(c,*ep)-1;
+ }
+ if (*sp<=*ep)
+ return *ep - *sp + 1;
+ else
+ return 0;
+}
+ulong TCImplementation::kmismatches(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k) const
+{
+ if (sp>ep) return 0;
+ if (j == 0)
+ {
+ result.push_back(std::make_pair(sp,ep));
+ return ep-sp+1;
+ }
+ int c;
+ ulong spnew;
+ ulong epnew;
+ int knew;
+ ulong sum=0;
+ if (k==0)
+ {
+ sum = searchPrefix(pattern, j, &sp, &ep);
+ if (sp<=ep)
+ result.push_back(std::make_pair(sp, ep));
+ return sum;
+ }
+ vector<int> chars = alphabetrank->accessAll(sp, ep);
+ for (vector<int>::iterator it = chars.begin(); it != chars.end(); ++it)
+ {
+ if (*it == 0)
+ continue; // skip '\0'
+ c = *it;
+ spnew = C[c]+alphabetrank->rank(c,sp-1);
+ epnew = C[c]+alphabetrank->rank(c,ep)-1;
+ if (c!=pattern[j-1]) knew = (int)k-1; else knew = k;
+ if (knew>=0) sum += kmismatches(result, pattern, spnew, epnew, j-1, knew);
+ }
+ return sum;
+}
+
+//first call kerrors(pattern,1,n,m+k,k,d,m), where d[i]=i
+ulong TCImplementation::kerrors(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k, ulong const *d, ulong m) const
+{
+ cout << "j = " << j << ", k = " << k << ", d:";
+ for (unsigned i = 0; i < m+1; ++i)
+ cout << " " << d[i];
+ cout << endl;
+
+ if (d[m]<=k) // range of suffixes with at most k-errors found
+ {
+ if (sp<=ep)
+ result.push_back(std::make_pair(sp, ep));
+ return (sp<=ep)?ep-sp+1:0ul;
+ }
+ if (sp>ep || j==0)
+ return 0;
+ ulong *dnew = new ulong[m+1];
+ int c;
+ ulong spnew;
+ ulong p,lowerbound;
+ ulong epnew;
+ ulong sum=0;
+ vector<int> chars = alphabetrank->accessAll(sp, ep);
+ for (vector<int>::iterator it = chars.begin(); it != chars.end(); ++it)
+ {
+ if (*it == 0)
+ continue; // skip '\0'
+ c = *it;
+ spnew = C[c]+alphabetrank->rank(c,sp-1);
+ epnew = C[c]+alphabetrank->rank(c,ep)-1;
+ if (spnew>epnew) continue;
+ dnew[0]=m+k-j+1;
+ lowerbound=k+1;
+ for (p=1; p<=m; p++) {
+ dnew[p]=myminofthree(d[p]+1,dnew[p-1]+1,(c==pattern[m-p])?d[p-1]:(d[p-1]+1));
+ if (dnew[p]<lowerbound)
+ lowerbound = dnew[p];
+ }
+ if (lowerbound<=k)
+ sum += kerrors(result, pattern, spnew, epnew, j-1, k,dnew,m);
+ }
+ delete [] dnew;
+ return sum;
+}
+
ulong TCImplementation::Search(uchar const * pattern, TextPosition m, TextPosition *spResult, TextPosition *epResult) const
{
{
// printf("i = %lu, c = %c, sp = %lu, ep = %lu\n", i, pattern[i], sp, ep);
c = (int)pattern[--i];
- uint result = alphabetrank->rank(c,ep);
+ uint result = alphabetrank->rankLessThan(c,ep);
if (result == ~0u)
ep = 0;
else
delete suffixes;
delete suffixDocId;
delete Doc;
+ delete textStorage;
}
void TCImplementation::makewavelet(uchar *bwt)
// delete [] bwt;
//alphabetrank = new RLWaveletTree(bwt, n); // Deletes bwt!
// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove
+#endif
alphabet_mapper * am = new alphabet_mapper_none();
static_bitsequence_builder * bmb = new static_bitsequence_builder_rrr02(8); // FIXME samplerate?
-// static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate?
wt_coder * wtc = new wt_coder_binary(bwt,n,am);
alphabetrank = new static_sequence_wvtree(bwt,n,wtc,bmb,am);
delete bmb;
bwt = 0; // already deleted
-// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage after WT: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#endif
}
void TCImplementation::maketables()
this->bwtEndPos = i;
}
- // Build up arrays for text length and starting positions
- // FIXME Temp, remove
- //BlockArray* textLength = new BlockArray(numberOfTexts, Tools::CeilLog2(maxTextLength));
+ // Build up array for text starting positions
BlockArray* textStartPos = new BlockArray(numberOfTexts, Tools::CeilLog2(this->n));
- //(*textLength)[0] = l;
(*textStartPos)[0] = 0;
- // Construct samples
- ulong sampleLength = (n%samplerate==0) ? n/samplerate : n/samplerate+1;
- unsigned ceilLog2n = Tools::CeilLog2(n);
- BlockArray* positions = new BlockArray(sampleLength, ceilLog2n);
- BlockArray* tmpSuffix = new BlockArray(sampleLength, ceilLog2n);
-
// Mapping from end-markers to doc ID's:
- BlockArray *endmarkerDocId = new BlockArray(numberOfTexts, Tools::CeilLog2(numberOfTexts));
+ uint *endmarkerDocId = new uint[numberOfTexts]; // FIXME Use BlockArray with static_sequence_wvtree_noptrs.
- ulong *sampledpositions = new ulong[n/W+1];
- for (ulong i=0;i<n/W+1;i++)
- sampledpositions[i]=0lu;
+ uint *sampledpositions = new uint[n/(sizeof(uint)*8)+1];
+ for (ulong i = 0; i < n / (sizeof(uint)*8) + 1; i++)
+ sampledpositions[i] = 0;
ulong x,p=bwtEndPos;
ulong sampleCount = 0;
ulongmax--;
uint alphabetrank_i_tmp =0;
- //positions:
- for (ulong i=n-1;i<ulongmax;i--) { // TODO bad solution with ulongmax?
- // i substitutes SA->GetPos(i)
+ /**
+ * First pass: populate tables textStartPos and sampledpositions.
+ */
+ for (ulong i=n-1;i<ulongmax;i--) {
+ // i substitutes SA->GetPos(i)
x=(i==n-1)?0:i+1;
if (x % samplerate == 0 && posOfSuccEndmarker - x > samplerate) {
- Tools::SetField(sampledpositions,1,p,1);
- (*positions)[sampleCount] = p;
- (*tmpSuffix)[sampleCount] = x; // FIXME remove
+ set_field(sampledpositions,1,p,1);
sampleCount ++;
}
// Record the order of end-markers in BWT:
ulong endmarkerRank = alphabetrank_i_tmp - 1;
- (*endmarkerDocId)[endmarkerRank] = textId;
+ endmarkerDocId[endmarkerRank] = (textId + 1) % numberOfTexts;
// Store text length and text start position:
if (textId < (DocId)numberOfTexts - 1)
{
- //(*textLength)[textId + 1] = posOfSuccEndmarker - x;
(*textStartPos)[textId + 1] = x; // x is the position of end-marker.
posOfSuccEndmarker = x;
}
// LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis).
p = textId; // Correct LF-mapping to the last char of the previous text.
}
- else
+ else // Now c != '\0', do LF-mapping:
p = C[c]+alphabetrank_i_tmp-1;
}
assert(textId == 0);
- sampled = new BSGAP(sampledpositions,n,true);
- sampleLength = sampled->rank(n-1);
+ sampled = new static_bitsequence_rrr02(sampledpositions, n, 16);
+ delete [] sampledpositions;
+ ulong sampleLength = sampled->rank1(n-1);
assert(sampleCount == sampleLength);
// Suffixes store an offset from the text start position
suffixes = new BlockArray(sampleLength, Tools::CeilLog2(maxTextLength));
suffixDocId = new BlockArray(sampleLength, Tools::CeilLog2(numberOfTexts));
- for(ulong i=0; i<sampleLength; i++) {
- assert((*positions)[i] < n);
- ulong j = sampled->rank((*positions)[i]);
- if (j==0) j=sampleLength;
- TextPosition textPos = (*tmpSuffix)[i];
- (*suffixDocId)[j-1] = DocIdAtTextPos(textStartPos, textPos);
-
- assert((unsigned)DocIdAtTextPos(textStartPos, textPos) < numberOfTexts);
- assert((*suffixDocId)[j-1] < numberOfTexts);
- // calculate offset from text start:
- (*suffixes)[j-1] = textPos - (*textStartPos)[(*suffixDocId)[j-1]];
+ p=bwtEndPos;
+ textId = numberOfTexts;
+
+ TextStorageBuilder tsbuilder(n);
+
+ /**
+ * Second pass: populate tables suffixes and suffixDocId.
+ */
+ for (ulong i=n-1;i<ulongmax;i--) {
+ x=(i==n-1)?0:i+1;
+
+ if (sampled->access(p)) {
+ ulong j = sampled->rank1(p)-1;
+
+ (*suffixDocId)[j] = DocIdAtTextPos(textStartPos, x);
+
+ // calculate offset from text start:
+ (*suffixes)[j] = x - (*textStartPos)[(*suffixDocId)[j]];
+ }
+
+ uchar c = alphabetrank->access(p, alphabetrank_i_tmp);
+ tsbuilder[i] = c;
+
+ if (c == '\0')
+ {
+ --textId;
+ // LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis).
+ p = textId; // Correct LF-mapping to the last char of the previous text.
+ }
+ else // Now c != '\0', do LF-mapping:
+ p = C[c]+alphabetrank_i_tmp-1;
}
- // FIXME Temp, remove
- delete tmpSuffix;
- delete positions;
-// delete textLength;
+ assert(textId == 0);
delete textStartPos;
+ textStorage = tsbuilder.InitTextStorage();
+
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage before Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
- uint *tmp = new uint[numberOfTexts]; // FIXME Silly...
-// cout << "Doc: ";
- for (unsigned i = 0; i < numberOfTexts; ++i)
- {
- tmp[i] = ((*endmarkerDocId)[i] + 1) % numberOfTexts;
- // cout << tmp[i] << ", ";
- }
-// cout << endl;
- delete endmarkerDocId;
alphabet_mapper * am = new alphabet_mapper_none();
static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate?
- wt_coder * wtc = new wt_coder_binary(tmp, numberOfTexts, am);
- Doc = new static_sequence_wvtree(tmp, numberOfTexts, wtc, bmb, am);
+ Doc = new static_sequence_wvtree_noptrs(endmarkerDocId, numberOfTexts, bmb, am, true);
delete bmb;
- delete [] tmp;
+ // delete [] endmarkerDocId; // already deleted in static_sequence_wvtree_noptrs!
- /* document_result res = Doc->access(1, 2, 0, 1);
- cout << "result: ";
- for (document_result::iterator it = res.begin(); it != res.end(); ++it)
- cout << *it << ", ";
- cout << endl;*/
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage after Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#endif
}