*****************************************************************************/
#include "TCImplementation.h"
-//#include "HeapProfiler.h" // FIXME remove
+//#define DEBUG_MEMUSAGE
+#ifdef DEBUG_MEMUSAGE
+#include "HeapProfiler.h" // FIXME remove
+#endif
#include <iostream>
#include <map>
{
// Save file version info
-const uchar TCImplementation::versionFlag = 4;
+const uchar TCImplementation::versionFlag = 6;
/**
* Constructor inits an empty dynamic FM-index.
* Samplerate defaults to TEXTCOLLECTION_DEFAULT_SAMPLERATE.
*/
-TCImplementation::TCImplementation(uchar * bwt, ulong length, unsigned samplerate_, unsigned numberOfTexts_, ulong maxTextLength_)
+TCImplementation::TCImplementation(uchar * bwt, ulong length, unsigned samplerate_,
+ unsigned numberOfTexts_, ulong maxTextLength_, ulong numberOfSamples_, char tsType)
: n(length), samplerate(samplerate_), alphabetrank(0), sampled(0), suffixes(0),
suffixDocId(0), numberOfTexts(numberOfTexts_), maxTextLength(maxTextLength_), Doc(0)
{
makewavelet(bwt); // Deletes bwt!
bwt = 0;
-
+
// Make sampling tables
- maketables();
+ maketables(numberOfSamples_, tsType);
}
bool TCImplementation::EmptyText(DocId k) const
return false; // Empty texts are not indexed
}
-uchar* TCImplementation::GetText(DocId k) const
+uchar * TCImplementation::GetText(DocId k) const
{
assert(k < (DocId)numberOfTexts);
- TextPosition i = k;
+
+ return textStorage->GetText(k);
+/* TextPosition i = k;
string result;
// Reserve average string length to avoid reallocs
res[i] = '\0';
for (ulong j = 0; j < i; ++j)
res[i-j-1] = result[j];
- return res;
+ return res;*/
}
/*
* Not supported
TextPosition sp = 0, ep = 0;
Search(pattern, m, &sp, &ep);
-
- TextCollection::document_result result;
- // Report end-markers in result interval
- unsigned resultSize = CountEndmarkers(sp, ep);
- if (resultSize == 0)
- return result;
-
- result.reserve(resultSize); // Try to avoid reallocation.
-
// Iterate through end-markers in [sp,ep]:
return EnumerateEndmarkers(sp, ep);
}
TextPosition sp = 0, ep = 0;
Search(pattern, m, &sp, &ep);
- TextCollection::document_result result;
-
- // Report end-markers in result interval
- unsigned resultSize = CountEndmarkers(sp, ep);
- if (resultSize == 0)
- return result;
-
- result.reserve(resultSize); // Try to avoid reallocation.
-
// Return end-markers in [sp,ep] and [begin, end]:
return EnumerateEndmarkers(sp, ep, begin, end);
}
TextCollection::document_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
- ulong sampled_rank_i = 0;
// Check each occurrence
for (; sp <= ep; ++sp)
{
TextPosition i = sp;
uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
+ while (c != '\0' && !sampled->access(i))
{
i = C[c]+alphabetrank->rank(c,i)-1;
c = alphabetrank->access(i);
}
else // Sampled position
{
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
+ DocId docId = (*suffixDocId)[sampled->rank1(i)-1];
result.push_back(docId);
}
}
TextCollection::document_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
- ulong sampled_rank_i = 0;
// Check each occurrence, already within [begin, end]
for (; sp <= ep; ++sp)
{
TextPosition i = sp;
uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
+ while (c != '\0' && !sampled->access(i))
{
i = C[c]+alphabetrank->rank(c,i)-1;
c = alphabetrank->access(i);
}
else // Sampled position
{
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
+ DocId docId = (*suffixDocId)[sampled->rank1(i)-1];
result.push_back(docId);
}
}
// Match including end-marker
Search(pattern, m+1, &sp, &ep);
- TextCollection::document_result result;
-
// Report end-markers in result interval
return EnumerateEndmarkers(sp, ep);
}
// Match including end-marker
Search(pattern, m+1, &sp, &ep, begin, end);
- TextCollection::document_result result;
-
// Report end-markers in result interval
return EnumerateEndmarkers(sp, ep, begin, end);
}
// We want unique document indentifiers, using std::set to collect them
std::set<DocId> resultSet;
-
- ulong sampled_rank_i = 0;
- // Check each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping
- c = alphabetrank->access(i);
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- resultSet.insert(Doc->access(endmarkerRank));
- }
- else
- {
- DocId di = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
- assert((unsigned)di < numberOfTexts);
- resultSet.insert(di);
- }
- }
+ EnumerateDocuments(resultSet, sp, ep);
// Convert std::set to std::vector
TextCollection::document_result result(resultSet.begin(), resultSet.end());
// We want unique document indentifiers, using std::set to collect them
std::set<DocId> resultSet;
-
- ulong sampled_rank_i = 0;
- // Check each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping
- c = alphabetrank->access(i);
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- DocId docId = Doc->access(endmarkerRank);
- if (docId >= begin && docId <= end)
- resultSet.insert(docId);
- }
- else
- {
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
- assert((unsigned)docId < numberOfTexts);
- if (docId >= begin && docId <= end)
- resultSet.insert(docId);
- }
- }
+ EnumerateDocuments(resultSet, sp, ep, begin, end);
// Convert std::set to std::vector
TextCollection::document_result result(resultSet.begin(), resultSet.end());
TextPosition sp = 0, ep = 0;
SearchLessThan(pattern, m, &sp, &ep);
- TextCollection::document_result result;
-
// Report end-markers in result interval
return EnumerateEndmarkers(sp, ep);
}
TextPosition sp = 0, ep = 0;
SearchLessThan(pattern, m, &sp, &ep);
- TextCollection::document_result result;
-
- // Report end-markers in result interval
- unsigned resultSize = CountEndmarkers(sp, ep);
- if (resultSize == 0)
- return result;
-
- result.reserve(resultSize); // Try to avoid reallocation.
-
// Iterate through end-markers in [sp,ep] and [begin, end]:
return EnumerateEndmarkers(sp, ep, begin, end);
}
+
+TextCollection::document_result TCImplementation::KMismaches(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::document_result(); // empty result set
+
+ suffix_range_vector ranges;
+ kmismatches(ranges, pattern, 0, n-1, m, k);
+ std::set<DocId> resultSet;
+
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumerateDocuments(resultSet, (*it).first, (*it).second);
+
+ // Convert std::set to std::vector
+ TextCollection::document_result result(resultSet.begin(), resultSet.end());
+ return result;
+}
+
+TextCollection::document_result TCImplementation::KErrors(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::document_result(); // empty result set
+
+ suffix_range_vector ranges;
+ ulong *dd = new ulong[m+1];
+ for (ulong i=0;i<m+1;i++)
+ dd[i]=i;
+ kerrors(ranges, pattern, 0, n-1, m+k, k, dd, m);
+ delete [] dd;
+
+ std::set<DocId> resultSet;
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumerateDocuments(resultSet, (*it).first, (*it).second);
+
+ // Convert std::set to std::vector
+ TextCollection::document_result result(resultSet.begin(), resultSet.end());
+ return result;
+}
+
+
/**
* Full result set queries
*/
full_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
-
- ulong sampled_rank_i = 0;
- // Report each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- TextPosition dist = 0;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1;
- c = alphabetrank->access(i);
- ++ dist;
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
- DocId docId = Doc->access(endmarkerRank);
- result.push_back(make_pair(docId, dist));
- }
- else
- {
- TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist;
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
-// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text
-
- result.push_back(make_pair(docId, textPos));
- }
- }
+ EnumeratePositions(result, sp, ep);
return result;
}
full_result result;
result.reserve(ep-sp+1); // Try to avoid reallocation.
+ EnumeratePositions(result, sp, ep, begin, end);
+
+ return result;
+}
- ulong sampled_rank_i = 0;
- // Report each occurrence
- for (; sp <= ep; ++sp)
- {
- TextPosition i = sp;
- TextPosition dist = 0;
- uchar c = alphabetrank->access(i);
- while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i))
- {
- i = C[c]+alphabetrank->rank(c,i)-1;
- c = alphabetrank->access(i);
- ++ dist;
- }
- if (c == '\0')
- {
- // Rank among the end-markers in BWT
- unsigned endmarkerRank = alphabetrank->rank(0, i) - 1;
-
- // End-marker that we found belongs to the "preceeding" doc in collection:
- DocId docId = Doc->access(endmarkerRank);
- if (docId >= begin && docId <= end)
- result.push_back(make_pair(docId, dist));
- }
- else
- {
- TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist;
- DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1];
-// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text
+TextCollection::full_result TCImplementation::FullKMismatches(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::full_result(); // empty result set
- if (docId >= begin && docId <= end)
- result.push_back(make_pair(docId, textPos));
- }
- }
-
+ suffix_range_vector ranges;
+ ulong count = kmismatches(ranges, pattern, 0, n-1, m, k);
+
+ TextCollection::full_result result;
+ result.reserve(count); // avoid reallocation.
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumeratePositions(result, (*it).first, (*it).second);
+ return result;
+}
+
+TextCollection::full_result TCImplementation::FullKErrors(uchar const * pattern, unsigned k) const
+{
+ TextPosition m = strlen((char *)pattern);
+ if (m == 0)
+ return TextCollection::full_result(); // empty result set
+
+ suffix_range_vector ranges;
+ ulong *dd = new ulong[m+1];
+ for (unsigned i=0;i<m+1;i++)
+ dd[i]=i;
+ ulong count = kerrors(ranges, pattern, 0, n-1, m+k, k, dd, m);
+ delete [] dd;
+
+ TextCollection::full_result result;
+ result.reserve(count); // avoid reallocation.
+ for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it)
+ // Iterate through docs in [sp,ep]:
+ EnumeratePositions(result, (*it).first, (*it).second);
return result;
}
throw std::runtime_error("TCImplementation::Save(): file write error (bwt end position).");
alphabetrank->save(file);
- sampled->Save(file);
+ sampled->save(file);
suffixes->Save(file);
suffixDocId->Save(file);
throw std::runtime_error("TCImplementation::Save(): file write error (maxTextLength).");
Doc->save(file);
+ textStorage->Save(file);
fflush(file);
}
throw std::runtime_error("TCImplementation::Load(): file read error (bwt end position).");
alphabetrank = static_sequence::load(file);
- sampled = new BSGAP(file);
+ sampled = static_bitsequence::load(file);
suffixes = new BlockArray(file);
suffixDocId = new BlockArray(file);
throw std::runtime_error("TCImplementation::Load(): file read error (maxTextLength).");
Doc = static_sequence::load(file);
+ textStorage = TextStorage::Load(file);
// FIXME Construct data structures with new samplerate
//maketables();
/**
* Rest of the functions follow...
*/
+ulong TCImplementation::searchPrefix(uchar const *pattern, ulong i, ulong *sp, ulong *ep) const
+{
+ int c;
+ while (*sp<=*ep && i>=1)
+ {
+ c = (int)pattern[--i];
+ *sp = C[c]+alphabetrank->rank(c,*sp-1);
+ *ep = C[c]+alphabetrank->rank(c,*ep)-1;
+ }
+ if (*sp<=*ep)
+ return *ep - *sp + 1;
+ else
+ return 0;
+}
+
+
+ulong TCImplementation::kmismatches(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k) const
+{
+ if (sp>ep) return 0;
+ if (j == 0)
+ {
+ result.push_back(std::make_pair(sp,ep));
+ return ep-sp+1;
+ }
+ int c;
+ ulong spnew;
+ ulong epnew;
+ int knew;
+ ulong sum=0;
+ if (k==0)
+ {
+ sum = searchPrefix(pattern, j, &sp, &ep);
+ if (sp<=ep)
+ result.push_back(std::make_pair(sp, ep));
+ return sum;
+ }
+ vector<int> chars = alphabetrank->accessAll(sp, ep);
+ for (vector<int>::iterator it = chars.begin(); it != chars.end(); ++it)
+ {
+ if (*it == 0)
+ continue; // skip '\0'
+ c = *it;
+ spnew = C[c]+alphabetrank->rank(c,sp-1);
+ epnew = C[c]+alphabetrank->rank(c,ep)-1;
+ if (c!=pattern[j-1]) knew = (int)k-1; else knew = k;
+ if (knew>=0) sum += kmismatches(result, pattern, spnew, epnew, j-1, knew);
+ }
+ return sum;
+}
+//first call kerrors(pattern,1,n,m+k,k,d,m), where d[i]=i
+ulong TCImplementation::kerrors(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k, ulong const *d, ulong m) const
+{
+ ulong sum=0;
+ if (d[m]<=k) // range of suffixes with at most k-errors found
+ {
+ if (sp<=ep)
+ result.push_back(std::make_pair(sp, ep));
+ sum += (sp<=ep)?ep-sp+1:0ul;
+ }
+ if (sp>ep || j==0)
+ return sum;
+ ulong *dnew = new ulong[m+1];
+ int c;
+ ulong spnew;
+ ulong p,lowerbound;
+ ulong epnew;
+ vector<int> chars = alphabetrank->accessAll(sp, ep);
+ for (vector<int>::iterator it = chars.begin(); it != chars.end(); ++it)
+ {
+ if (*it == 0)
+ continue; // skip '\0'
+ c = *it;
+ spnew = C[c]+alphabetrank->rank(c,sp-1);
+ epnew = C[c]+alphabetrank->rank(c,ep)-1;
+ if (spnew>epnew) continue;
+ dnew[0]=m+k-j+1;
+ lowerbound=k+1;
+ for (p=1; p<=m; p++) {
+ dnew[p]=myminofthree(d[p]+1,dnew[p-1]+1,(c==pattern[m-p])?d[p-1]:(d[p-1]+1));
+ if (dnew[p]<lowerbound)
+ lowerbound = dnew[p];
+ }
+ if (lowerbound<=k)
+ sum += kerrors(result, pattern, spnew, epnew, j-1, k,dnew,m);
+ }
+ delete [] dnew;
+ return sum;
+}
ulong TCImplementation::Search(uchar const * pattern, TextPosition m, TextPosition *spResult, TextPosition *epResult) const
{
// printf("i = %lu, c = %c, sp = %lu, ep = %lu\n", i, pattern[i], sp, ep);
c = (int)pattern[--i];
- uint result = alphabetrank->rank(c,ep);
+ uint result = alphabetrank->rankLessThan(c,ep);
if (result == ~0u)
ep = 0;
else
delete suffixes;
delete suffixDocId;
delete Doc;
+ delete textStorage;
}
void TCImplementation::makewavelet(uchar *bwt)
// delete [] bwt;
//alphabetrank = new RLWaveletTree(bwt, n); // Deletes bwt!
// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
alphabet_mapper * am = new alphabet_mapper_none();
static_bitsequence_builder * bmb = new static_bitsequence_builder_rrr02(8); // FIXME samplerate?
-// static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate?
wt_coder * wtc = new wt_coder_binary(bwt,n,am);
alphabetrank = new static_sequence_wvtree(bwt,n,wtc,bmb,am);
delete bmb;
bwt = 0; // already deleted
-// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
-// std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage after WT: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#endif
}
-void TCImplementation::maketables()
+void TCImplementation::maketables(ulong sampleLength, char tsType)
{
// Calculate BWT end-marker position (of last inserted text)
{
this->bwtEndPos = i;
}
- // Build up arrays for text length and starting positions
- // FIXME Temp, remove
- //BlockArray* textLength = new BlockArray(numberOfTexts, Tools::CeilLog2(maxTextLength));
- BlockArray* textStartPos = new BlockArray(numberOfTexts, Tools::CeilLog2(this->n));
- //(*textLength)[0] = l;
- (*textStartPos)[0] = 0;
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage before BWT traverse: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " / " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes, " << HeapProfiler::GetHeapConsumption() << " / " << HeapProfiler::GetMaxHeapConsumption() << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
- // Construct samples
- ulong sampleLength = (n%samplerate==0) ? n/samplerate : n/samplerate+1;
- unsigned ceilLog2n = Tools::CeilLog2(n);
- BlockArray* positions = new BlockArray(sampleLength, ceilLog2n);
- BlockArray* tmpSuffix = new BlockArray(sampleLength, ceilLog2n);
+ // Build up array for text starting positions
+// BlockArray* textStartPos = new BlockArray(numberOfTexts, Tools::CeilLog2(this->n));
+// (*textStartPos)[0] = 0;
// Mapping from end-markers to doc ID's:
- BlockArray *endmarkerDocId = new BlockArray(numberOfTexts, Tools::CeilLog2(numberOfTexts));
-
- ulong *sampledpositions = new ulong[n/W+1];
- for (ulong i=0;i<n/W+1;i++)
- sampledpositions[i]=0lu;
+ unsigned logNumberOfTexts = Tools::CeilLog2(numberOfTexts);
+ uint *endmarkerDocId = new uint[(numberOfTexts * logNumberOfTexts)/(8*sizeof(uint)) + 1];
+
+ BlockArray* positions = new BlockArray(sampleLength, Tools::CeilLog2(this->n));
+ uint *sampledpositions = new uint[n/(sizeof(uint)*8)+1];
+ for (ulong i = 0; i < n / (sizeof(uint)*8) + 1; i++)
+ sampledpositions[i] = 0;
ulong x,p=bwtEndPos;
ulong sampleCount = 0;
// Keeping track of text position of prev. end-marker seen
- ulong posOfSuccEndmarker = n;
+ ulong posOfSuccEndmarker = n-1;
DocId textId = numberOfTexts;
ulong ulongmax = 0;
ulongmax--;
uint alphabetrank_i_tmp =0;
- //positions:
- for (ulong i=n-1;i<ulongmax;i--) { // TODO bad solution with ulongmax?
- // i substitutes SA->GetPos(i)
+ TextStorageBuilder tsbuilder(n);
+ Tools::StartTimer();
+
+ for (ulong i=n-1;i<ulongmax;i--) {
+ // i substitutes SA->GetPos(i)
x=(i==n-1)?0:i+1;
- if (x % samplerate == 0 && posOfSuccEndmarker - x > samplerate) {
- Tools::SetField(sampledpositions,1,p,1);
- (*positions)[sampleCount] = p;
- (*tmpSuffix)[sampleCount] = x; // FIXME remove
+ uchar c = alphabetrank->access(p, alphabetrank_i_tmp);
+ tsbuilder[i] = c;
+
+ if ((posOfSuccEndmarker - i) % samplerate == 0 && c != '\0')
+ {
+ set_field(sampledpositions,1,p,1);
+ (*positions)[sampleCount] = p;
sampleCount ++;
}
- uchar c = alphabetrank->access(p, alphabetrank_i_tmp);
if (c == '\0')
{
--textId;
// Record the order of end-markers in BWT:
ulong endmarkerRank = alphabetrank_i_tmp - 1;
- (*endmarkerDocId)[endmarkerRank] = textId;
+ set_field(endmarkerDocId, logNumberOfTexts, endmarkerRank, (textId + 1) % numberOfTexts);
// Store text length and text start position:
if (textId < (DocId)numberOfTexts - 1)
{
- //(*textLength)[textId + 1] = posOfSuccEndmarker - x;
- (*textStartPos)[textId + 1] = x; // x is the position of end-marker.
- posOfSuccEndmarker = x;
+// (*textStartPos)[textId + 1] = x; // x-1 is text position of end-marker.
+ posOfSuccEndmarker = i;
}
// LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis).
p = textId; // Correct LF-mapping to the last char of the previous text.
}
- else
+ else // Now c != '\0', do LF-mapping:
p = C[c]+alphabetrank_i_tmp-1;
}
assert(textId == 0);
-
- sampled = new BSGAP(sampledpositions,n,true);
- sampleLength = sampled->rank(n-1);
+
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage before tsbuilder init: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " / " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes, " << HeapProfiler::GetHeapConsumption() << " / " << HeapProfiler::GetMaxHeapConsumption() << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
+
+ textStorage = tsbuilder.InitTextStorage(tsType);
+
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage after tsbuilder init: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " / " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes, " << HeapProfiler::GetHeapConsumption() << " / " << HeapProfiler::GetMaxHeapConsumption() << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
+
+ sampled = new static_bitsequence_rrr02(sampledpositions, n, 16);
+ delete [] sampledpositions;
assert(sampleCount == sampleLength);
+ assert(sampled->rank1(n-1) == sampleLength);
+
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage after sampled bit vector: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " / " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes, " << HeapProfiler::GetHeapConsumption() << " / " << HeapProfiler::GetMaxHeapConsumption() << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
// Suffixes store an offset from the text start position
suffixes = new BlockArray(sampleLength, Tools::CeilLog2(maxTextLength));
suffixDocId = new BlockArray(sampleLength, Tools::CeilLog2(numberOfTexts));
+ x = n - 2;
+ posOfSuccEndmarker = n-1;
for(ulong i=0; i<sampleLength; i++) {
+ // Find next sampled text position
+ while ((posOfSuccEndmarker - x) % samplerate != 0)
+ {
+ --x;
+ assert(x != ~0lu);
+ if (textStorage->IsEndmarker(x))
+ posOfSuccEndmarker = x--;
+ }
assert((*positions)[i] < n);
- ulong j = sampled->rank((*positions)[i]);
- if (j==0) j=sampleLength;
- TextPosition textPos = (*tmpSuffix)[i];
- (*suffixDocId)[j-1] = DocIdAtTextPos(textStartPos, textPos);
+ ulong j = sampled->rank1((*positions)[i]);
+
+ assert(j != 0); // if (j==0) j=sampleLength;
+
+ TextPosition textPos = (x==n-1)?0:x+1;
+ (*suffixDocId)[j-1] = textStorage->DocIdAtTextPos(textPos);
- assert((unsigned)DocIdAtTextPos(textStartPos, textPos) < numberOfTexts);
assert((*suffixDocId)[j-1] < numberOfTexts);
// calculate offset from text start:
- (*suffixes)[j-1] = textPos - (*textStartPos)[(*suffixDocId)[j-1]];
+ (*suffixes)[j-1] = textPos - textStorage->TextStartPos((*suffixDocId)[j-1]);
+ --x;
+ if (x != ~0lu && textStorage->IsEndmarker(x))
+ posOfSuccEndmarker = x--;
}
- // FIXME Temp, remove
- delete tmpSuffix;
+
delete positions;
-// delete textLength;
- delete textStartPos;
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "heap usage after sampled arrays: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " / " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes, " << HeapProfiler::GetHeapConsumption() << " / " << HeapProfiler::GetMaxHeapConsumption() << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
+
+ /**
+ * Second pass: check tables
+ */
+/* p=bwtEndPos;
+ textId = numberOfTexts;
+ for (ulong i=n-1;i<ulongmax;i--) {
+ x=(i==n-1)?0:i+1;
- uint *tmp = new uint[numberOfTexts]; // FIXME Silly...
-// cout << "Doc: ";
- for (unsigned i = 0; i < numberOfTexts; ++i)
- {
- tmp[i] = ((*endmarkerDocId)[i] + 1) % numberOfTexts;
- // cout << tmp[i] << ", ";
+ if (sampled->access(p)) {
+ ulong j = sampled->rank1(p)-1;
+ assert((*suffixDocId)[j] == DocIdAtTextPos(textStartPos, x));
+
+ // calculate offset from text start:
+ assert((*suffixes)[j] == x - (*textStartPos)[(*suffixDocId)[j]]);
+ }
+
+ uchar c = alphabetrank->access(p, alphabetrank_i_tmp);
+
+ if (c == '\0')
+ {
+ --textId;
+ // LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis).
+ p = textId; // Correct LF-mapping to the last char of the previous text.
+ }
+ else // Now c != '\0', do LF-mapping:
+ p = C[c]+alphabetrank_i_tmp-1;
}
-// cout << endl;
- delete endmarkerDocId;
+ assert(textId == 0);
+ delete textStartPos
+*/
+
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage before Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+ HeapProfiler::ResetMaxHeapConsumption();
+#endif
+
alphabet_mapper * am = new alphabet_mapper_none();
- static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate?
- wt_coder * wtc = new wt_coder_binary(tmp, numberOfTexts, am);
- Doc = new static_sequence_wvtree(tmp, numberOfTexts, wtc, bmb, am);
+ static_bitsequence_builder * bmb = new static_bitsequence_builder_rrr02(32); // FIXME samplerate?
+ Doc = new static_sequence_wvtree_noptrs(endmarkerDocId, numberOfTexts, logNumberOfTexts, bmb, am, true);
delete bmb;
- delete [] tmp;
+ // delete [] endmarkerDocId; // already deleted in static_sequence_wvtree_noptrs!
- /* document_result res = Doc->access(1, 2, 0, 1);
- cout << "result: ";
- for (document_result::iterator it = res.begin(); it != res.end(); ++it)
- cout << *it << ", ";
- cout << endl;*/
+#ifdef DEBUG_MEMUSAGE
+ std::cerr << "max heap usage after Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl;
+#endif
}