X-Git-Url: http://git.nguyen.vg/gitweb/?a=blobdiff_plain;f=TCImplementation.cpp;h=c3b430a16e7ac762420c0c8c514d9501693608b8;hb=3c8e8e6146312f6f55046c451897234fed56608d;hp=5a754c708fb971ca9f470f3a4a178d0a0b9b4d09;hpb=9db8f89e9dbf9e1d61a12d0e6094c4db0b7111e4;p=SXSI%2FTextCollection.git diff --git a/TCImplementation.cpp b/TCImplementation.cpp index 5a754c7..c3b430a 100644 --- a/TCImplementation.cpp +++ b/TCImplementation.cpp @@ -19,7 +19,10 @@ *****************************************************************************/ #include "TCImplementation.h" -//#include "HeapProfiler.h" // FIXME remove +//#define DEBUG_MEMUSAGE +#ifdef DEBUG_MEMUSAGE +#include "HeapProfiler.h" // FIXME remove +#endif #include #include @@ -38,7 +41,7 @@ namespace SXSI { // Save file version info -const uchar TCImplementation::versionFlag = 4; +const uchar TCImplementation::versionFlag = 6; /** * Constructor inits an empty dynamic FM-index. @@ -61,10 +64,12 @@ bool TCImplementation::EmptyText(DocId k) const return false; // Empty texts are not indexed } -uchar* TCImplementation::GetText(DocId k) const +uchar * TCImplementation::GetText(DocId k) const { assert(k < (DocId)numberOfTexts); - TextPosition i = k; + + return textStorage->GetText(k); +/* TextPosition i = k; string result; // Reserve average string length to avoid reallocs @@ -85,7 +90,7 @@ uchar* TCImplementation::GetText(DocId k) const res[i] = '\0'; for (ulong j = 0; j < i; ++j) res[i-j-1] = result[j]; - return res; + return res;*/ } /* * Not supported @@ -409,14 +414,13 @@ TextCollection::document_result TCImplementation::Suffix(uchar const * pattern) TextCollection::document_result result; result.reserve(ep-sp+1); // Try to avoid reallocation. - ulong sampled_rank_i = 0; // Check each occurrence for (; sp <= ep; ++sp) { TextPosition i = sp; uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) + while (c != '\0' && !sampled->access(i)) { i = C[c]+alphabetrank->rank(c,i)-1; c = alphabetrank->access(i); @@ -431,7 +435,7 @@ TextCollection::document_result TCImplementation::Suffix(uchar const * pattern) } else // Sampled position { - DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; + DocId docId = (*suffixDocId)[sampled->rank1(i)-1]; result.push_back(docId); } } @@ -452,14 +456,13 @@ TextCollection::document_result TCImplementation::Suffix(uchar const * pattern, TextCollection::document_result result; result.reserve(ep-sp+1); // Try to avoid reallocation. - ulong sampled_rank_i = 0; // Check each occurrence, already within [begin, end] for (; sp <= ep; ++sp) { TextPosition i = sp; uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) + while (c != '\0' && !sampled->access(i)) { i = C[c]+alphabetrank->rank(c,i)-1; c = alphabetrank->access(i); @@ -474,7 +477,7 @@ TextCollection::document_result TCImplementation::Suffix(uchar const * pattern, } else // Sampled position { - DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; + DocId docId = (*suffixDocId)[sampled->rank1(i)-1]; result.push_back(docId); } } @@ -524,31 +527,7 @@ TextCollection::document_result TCImplementation::Contains(uchar const * pattern // We want unique document indentifiers, using std::set to collect them std::set resultSet; - - ulong sampled_rank_i = 0; - // Check each occurrence - for (; sp <= ep; ++sp) - { - TextPosition i = sp; - uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) - { - i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping - c = alphabetrank->access(i); - } - if (c == '\0') - { - // Rank among the end-markers in BWT - unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; - resultSet.insert(Doc->access(endmarkerRank)); - } - else - { - DocId di = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; - assert((unsigned)di < numberOfTexts); - resultSet.insert(di); - } - } + EnumerateDocuments(resultSet, sp, ep); // Convert std::set to std::vector TextCollection::document_result result(resultSet.begin(), resultSet.end()); @@ -567,34 +546,7 @@ TextCollection::document_result TCImplementation::Contains(uchar const * pattern // We want unique document indentifiers, using std::set to collect them std::set resultSet; - - ulong sampled_rank_i = 0; - // Check each occurrence - for (; sp <= ep; ++sp) - { - TextPosition i = sp; - uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) - { - i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping - c = alphabetrank->access(i); - } - if (c == '\0') - { - // Rank among the end-markers in BWT - unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; - DocId docId = Doc->access(endmarkerRank); - if (docId >= begin && docId <= end) - resultSet.insert(docId); - } - else - { - DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; - assert((unsigned)docId < numberOfTexts); - if (docId >= begin && docId <= end) - resultSet.insert(docId); - } - } + EnumerateDocuments(resultSet, sp, ep, begin, end); // Convert std::set to std::vector TextCollection::document_result result(resultSet.begin(), resultSet.end()); @@ -627,6 +579,50 @@ TextCollection::document_result TCImplementation::LessThan(uchar const * pattern return EnumerateEndmarkers(sp, ep, begin, end); } + +TextCollection::document_result TCImplementation::Kmismaches(uchar const * pattern, unsigned k) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // empty result set + + suffix_range_vector ranges; + kmismatches(ranges, pattern, 0, n-1, m, k); + std::set resultSet; + + for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it) + // Iterate through docs in [sp,ep]: + EnumerateDocuments(resultSet, (*it).first, (*it).second); + + // Convert std::set to std::vector + TextCollection::document_result result(resultSet.begin(), resultSet.end()); + return result; +} + +TextCollection::document_result TCImplementation::Kerrors(uchar const * pattern, unsigned k) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // empty result set + + suffix_range_vector ranges; + ulong *dd = new ulong[m+1]; + for (ulong i=0;i resultSet; + for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it) + // Iterate through docs in [sp,ep]: + EnumerateDocuments(resultSet, (*it).first, (*it).second); + + // Convert std::set to std::vector + TextCollection::document_result result(resultSet.begin(), resultSet.end()); + return result; +} + + /** * Full result set queries */ @@ -642,36 +638,7 @@ TextCollection::full_result TCImplementation::FullContains(uchar const * pattern full_result result; result.reserve(ep-sp+1); // Try to avoid reallocation. - - ulong sampled_rank_i = 0; - // Report each occurrence - for (; sp <= ep; ++sp) - { - TextPosition i = sp; - TextPosition dist = 0; - uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) - { - i = C[c]+alphabetrank->rank(c,i)-1; - c = alphabetrank->access(i); - ++ dist; - } - if (c == '\0') - { - // Rank among the end-markers in BWT - unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; - DocId docId = Doc->access(endmarkerRank); - result.push_back(make_pair(docId, dist)); - } - else - { - TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist; - DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; -// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text - - result.push_back(make_pair(docId, textPos)); - } - } + EnumeratePositions(result, sp, ep); return result; } @@ -688,41 +655,46 @@ TextCollection::full_result TCImplementation::FullContains(uchar const * pattern full_result result; result.reserve(ep-sp+1); // Try to avoid reallocation. + EnumeratePositions(result, sp, ep, begin, end); + + return result; +} - ulong sampled_rank_i = 0; - // Report each occurrence - for (; sp <= ep; ++sp) - { - TextPosition i = sp; - TextPosition dist = 0; - uchar c = alphabetrank->access(i); - while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) - { - i = C[c]+alphabetrank->rank(c,i)-1; - c = alphabetrank->access(i); - ++ dist; - } - if (c == '\0') - { - // Rank among the end-markers in BWT - unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; - - // End-marker that we found belongs to the "preceeding" doc in collection: - DocId docId = Doc->access(endmarkerRank); - if (docId >= begin && docId <= end) - result.push_back(make_pair(docId, dist)); - } - else - { - TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist; - DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; -// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text +TextCollection::full_result TCImplementation::FullKmismatches(uchar const * pattern, unsigned k) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::full_result(); // empty result set - if (docId >= begin && docId <= end) - result.push_back(make_pair(docId, textPos)); - } - } - + suffix_range_vector ranges; + ulong count = kmismatches(ranges, pattern, 0, n-1, m, k); + + TextCollection::full_result result; + result.reserve(count); // avoid reallocation. + for (suffix_range_vector::iterator it = ranges.begin(); it != ranges.end(); ++it) + // Iterate through docs in [sp,ep]: + EnumeratePositions(result, (*it).first, (*it).second); + return result; +} + +TextCollection::full_result TCImplementation::FullKerrors(uchar const * pattern, unsigned k) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::full_result(); // empty result set + + suffix_range_vector ranges; + ulong *dd = new ulong[m+1]; + for (unsigned i=0;isave(file); - sampled->Save(file); + sampled->save(file); suffixes->Save(file); suffixDocId->Save(file); @@ -775,6 +747,7 @@ void TCImplementation::Save(FILE *file) const throw std::runtime_error("TCImplementation::Save(): file write error (maxTextLength)."); Doc->save(file); + textStorage->Save(file); fflush(file); } @@ -811,7 +784,7 @@ TCImplementation::TCImplementation(FILE *file, unsigned samplerate_) throw std::runtime_error("TCImplementation::Load(): file read error (bwt end position)."); alphabetrank = static_sequence::load(file); - sampled = new BSGAP(file); + sampled = static_bitsequence::load(file); suffixes = new BlockArray(file); suffixDocId = new BlockArray(file); @@ -821,6 +794,7 @@ TCImplementation::TCImplementation(FILE *file, unsigned samplerate_) throw std::runtime_error("TCImplementation::Load(): file read error (maxTextLength)."); Doc = static_sequence::load(file); + textStorage = new TextStorage(file); // FIXME Construct data structures with new samplerate //maketables(); @@ -831,8 +805,101 @@ TCImplementation::TCImplementation(FILE *file, unsigned samplerate_) /** * Rest of the functions follow... */ +ulong TCImplementation::searchPrefix(uchar const *pattern, ulong i, ulong *sp, ulong *ep) const +{ + int c; + while (*sp<=*ep && i>=1) + { + c = (int)pattern[--i]; + *sp = C[c]+alphabetrank->rank(c,*sp-1); + *ep = C[c]+alphabetrank->rank(c,*ep)-1; + } + if (*sp<=*ep) + return *ep - *sp + 1; + else + return 0; +} +ulong TCImplementation::kmismatches(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k) const +{ + if (sp>ep) return 0; + if (j == 0) + { + result.push_back(std::make_pair(sp,ep)); + return ep-sp+1; + } + int c; + ulong spnew; + ulong epnew; + int knew; + ulong sum=0; + if (k==0) + { + sum = searchPrefix(pattern, j, &sp, &ep); + if (sp<=ep) + result.push_back(std::make_pair(sp, ep)); + return sum; + } + vector chars = alphabetrank->accessAll(sp, ep); + for (vector::iterator it = chars.begin(); it != chars.end(); ++it) + { + if (*it == 0) + continue; // skip '\0' + c = *it; + spnew = C[c]+alphabetrank->rank(c,sp-1); + epnew = C[c]+alphabetrank->rank(c,ep)-1; + if (c!=pattern[j-1]) knew = (int)k-1; else knew = k; + if (knew>=0) sum += kmismatches(result, pattern, spnew, epnew, j-1, knew); + } + return sum; +} + +//first call kerrors(pattern,1,n,m+k,k,d,m), where d[i]=i +ulong TCImplementation::kerrors(suffix_range_vector &result, uchar const *pattern, ulong sp, ulong ep, ulong j, unsigned k, ulong const *d, ulong m) const +{ + cout << "j = " << j << ", k = " << k << ", d:"; + for (unsigned i = 0; i < m+1; ++i) + cout << " " << d[i]; + cout << endl; + + if (d[m]<=k) // range of suffixes with at most k-errors found + { + if (sp<=ep) + result.push_back(std::make_pair(sp, ep)); + return (sp<=ep)?ep-sp+1:0ul; + } + if (sp>ep || j==0) + return 0; + ulong *dnew = new ulong[m+1]; + int c; + ulong spnew; + ulong p,lowerbound; + ulong epnew; + ulong sum=0; + vector chars = alphabetrank->accessAll(sp, ep); + for (vector::iterator it = chars.begin(); it != chars.end(); ++it) + { + if (*it == 0) + continue; // skip '\0' + c = *it; + spnew = C[c]+alphabetrank->rank(c,sp-1); + epnew = C[c]+alphabetrank->rank(c,ep)-1; + if (spnew>epnew) continue; + dnew[0]=m+k-j+1; + lowerbound=k+1; + for (p=1; p<=m; p++) { + dnew[p]=myminofthree(d[p]+1,dnew[p-1]+1,(c==pattern[m-p])?d[p-1]:(d[p-1]+1)); + if (dnew[p]rank(c,ep); + uint result = alphabetrank->rankLessThan(c,ep); if (result == ~0u) ep = 0; else @@ -912,6 +979,7 @@ TCImplementation::~TCImplementation() { delete suffixes; delete suffixDocId; delete Doc; + delete textStorage; } void TCImplementation::makewavelet(uchar *bwt) @@ -939,20 +1007,23 @@ void TCImplementation::makewavelet(uchar *bwt) // delete [] bwt; //alphabetrank = new RLWaveletTree(bwt, n); // Deletes bwt! // std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; -// std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; -// HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove +#ifdef DEBUG_MEMUSAGE + std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; + HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove +#endif alphabet_mapper * am = new alphabet_mapper_none(); static_bitsequence_builder * bmb = new static_bitsequence_builder_rrr02(8); // FIXME samplerate? -// static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate? wt_coder * wtc = new wt_coder_binary(bwt,n,am); alphabetrank = new static_sequence_wvtree(bwt,n,wtc,bmb,am); delete bmb; bwt = 0; // already deleted -// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; -// std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; +#ifdef DEBUG_MEMUSAGE + std::cerr << "heap usage after WT: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; + std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; +#endif } void TCImplementation::maketables() @@ -971,25 +1042,16 @@ void TCImplementation::maketables() this->bwtEndPos = i; } - // Build up arrays for text length and starting positions - // FIXME Temp, remove - //BlockArray* textLength = new BlockArray(numberOfTexts, Tools::CeilLog2(maxTextLength)); + // Build up array for text starting positions BlockArray* textStartPos = new BlockArray(numberOfTexts, Tools::CeilLog2(this->n)); - //(*textLength)[0] = l; (*textStartPos)[0] = 0; - // Construct samples - ulong sampleLength = (n%samplerate==0) ? n/samplerate : n/samplerate+1; - unsigned ceilLog2n = Tools::CeilLog2(n); - BlockArray* positions = new BlockArray(sampleLength, ceilLog2n); - BlockArray* tmpSuffix = new BlockArray(sampleLength, ceilLog2n); - // Mapping from end-markers to doc ID's: - BlockArray *endmarkerDocId = new BlockArray(numberOfTexts, Tools::CeilLog2(numberOfTexts)); + uint *endmarkerDocId = new uint[numberOfTexts]; // FIXME Use BlockArray with static_sequence_wvtree_noptrs. - ulong *sampledpositions = new ulong[n/W+1]; - for (ulong i=0;iGetPos(i) + /** + * First pass: populate tables textStartPos and sampledpositions. + */ + for (ulong i=n-1;iGetPos(i) x=(i==n-1)?0:i+1; if (x % samplerate == 0 && posOfSuccEndmarker - x > samplerate) { - Tools::SetField(sampledpositions,1,p,1); - (*positions)[sampleCount] = p; - (*tmpSuffix)[sampleCount] = x; // FIXME remove + set_field(sampledpositions,1,p,1); sampleCount ++; } @@ -1019,12 +1081,11 @@ void TCImplementation::maketables() // Record the order of end-markers in BWT: ulong endmarkerRank = alphabetrank_i_tmp - 1; - (*endmarkerDocId)[endmarkerRank] = textId; + endmarkerDocId[endmarkerRank] = (textId + 1) % numberOfTexts; // Store text length and text start position: if (textId < (DocId)numberOfTexts - 1) { - //(*textLength)[textId + 1] = posOfSuccEndmarker - x; (*textStartPos)[textId + 1] = x; // x is the position of end-marker. posOfSuccEndmarker = x; } @@ -1032,59 +1093,71 @@ void TCImplementation::maketables() // LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis). p = textId; // Correct LF-mapping to the last char of the previous text. } - else + else // Now c != '\0', do LF-mapping: p = C[c]+alphabetrank_i_tmp-1; } assert(textId == 0); - sampled = new BSGAP(sampledpositions,n,true); - sampleLength = sampled->rank(n-1); + sampled = new static_bitsequence_rrr02(sampledpositions, n, 16); + delete [] sampledpositions; + ulong sampleLength = sampled->rank1(n-1); assert(sampleCount == sampleLength); // Suffixes store an offset from the text start position suffixes = new BlockArray(sampleLength, Tools::CeilLog2(maxTextLength)); suffixDocId = new BlockArray(sampleLength, Tools::CeilLog2(numberOfTexts)); - for(ulong i=0; irank((*positions)[i]); - if (j==0) j=sampleLength; - TextPosition textPos = (*tmpSuffix)[i]; - (*suffixDocId)[j-1] = DocIdAtTextPos(textStartPos, textPos); - - assert((unsigned)DocIdAtTextPos(textStartPos, textPos) < numberOfTexts); - assert((*suffixDocId)[j-1] < numberOfTexts); - // calculate offset from text start: - (*suffixes)[j-1] = textPos - (*textStartPos)[(*suffixDocId)[j-1]]; + p=bwtEndPos; + textId = numberOfTexts; + + TextStorageBuilder tsbuilder(n); + + /** + * Second pass: populate tables suffixes and suffixDocId. + */ + for (ulong i=n-1;iaccess(p)) { + ulong j = sampled->rank1(p)-1; + + (*suffixDocId)[j] = DocIdAtTextPos(textStartPos, x); + + // calculate offset from text start: + (*suffixes)[j] = x - (*textStartPos)[(*suffixDocId)[j]]; + } + + uchar c = alphabetrank->access(p, alphabetrank_i_tmp); + tsbuilder[i] = c; + + if (c == '\0') + { + --textId; + // LF-mapping from '\0' does not work with this (pseudo) BWT (see details from Wolfgang's thesis). + p = textId; // Correct LF-mapping to the last char of the previous text. + } + else // Now c != '\0', do LF-mapping: + p = C[c]+alphabetrank_i_tmp-1; } - // FIXME Temp, remove - delete tmpSuffix; - delete positions; -// delete textLength; + assert(textId == 0); delete textStartPos; + textStorage = tsbuilder.InitTextStorage(); + +#ifdef DEBUG_MEMUSAGE + std::cerr << "max heap usage before Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; + HeapProfiler::ResetMaxHeapConsumption(); +#endif - uint *tmp = new uint[numberOfTexts]; // FIXME Silly... -// cout << "Doc: "; - for (unsigned i = 0; i < numberOfTexts; ++i) - { - tmp[i] = ((*endmarkerDocId)[i] + 1) % numberOfTexts; - // cout << tmp[i] << ", "; - } -// cout << endl; - delete endmarkerDocId; alphabet_mapper * am = new alphabet_mapper_none(); static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate? - wt_coder * wtc = new wt_coder_binary(tmp, numberOfTexts, am); - Doc = new static_sequence_wvtree(tmp, numberOfTexts, wtc, bmb, am); + Doc = new static_sequence_wvtree_noptrs(endmarkerDocId, numberOfTexts, bmb, am, true); delete bmb; - delete [] tmp; + // delete [] endmarkerDocId; // already deleted in static_sequence_wvtree_noptrs! - /* document_result res = Doc->access(1, 2, 0, 1); - cout << "result: "; - for (document_result::iterator it = res.begin(); it != res.end(); ++it) - cout << *it << ", "; - cout << endl;*/ +#ifdef DEBUG_MEMUSAGE + std::cerr << "max heap usage after Doc: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; +#endif }