X-Git-Url: http://git.nguyen.vg/gitweb/?a=blobdiff_plain;ds=sidebyside;f=CSA.cpp;h=0e1fcc9ec81492a4572ebb586b079bd0da36a25a;hb=79c1cda6f37183ad061b28a2d018b29e621ed052;hp=a190b48c73299993fd0068f29e23945c0f345195;hpb=96755a8cb6d7f1bf688115ef2ff677290c54523e;p=SXSI%2FTextCollection.git diff --git a/CSA.cpp b/CSA.cpp index a190b48..0e1fcc9 100644 --- a/CSA.cpp +++ b/CSA.cpp @@ -205,6 +205,13 @@ void CSA::MakeStatic() throw std::runtime_error("CSA::MakeStatic(): Data structure is already static (dynFMI == 0)."); #endif + if (texts.size() == 0) // Empty collection + { + ++n; + ++numberOfTexts; + texts.append(1, '\0'); + } + // Bitvector of empty texts, FIXME Remove? { //std::cout << std::endl << "texts: " << numberOfTexts << ", all texts " << numberOfAllTexts << ", size : " << emptyTextId.size() < 0) + return true; + return false; +} + bool CSA::IsEqual(uchar const *pattern) const { TextPosition m = std::strlen((char *)pattern); @@ -416,6 +459,26 @@ bool CSA::IsEqual(uchar const *pattern) const return false; } +bool CSA::IsEqual(uchar const *pattern, DocId begin, DocId end) const +{ + TextPosition m = std::strlen((char *)pattern); + if (m == 0) + { + if (numberOfAllTexts - numberOfTexts > 0u) + return true; // Empty texts exists + return false; // No empty texts exists + } + + TextPosition sp = 0, ep = 0; + // Match including end-marker + Search(pattern, m+1, &sp, &ep, begin, end); + + // Check for end-marker(s) in result interval + if (CountEndmarkers(sp, ep)) + return true; + return false; +} + bool CSA::IsContains(uchar const * pattern) const { TextPosition m = strlen((char *)pattern); @@ -431,10 +494,25 @@ bool CSA::IsContains(uchar const * pattern) const return false; } -bool CSA::IsLessThan(uchar const*) const +bool CSA::IsContains(uchar const * pattern, DocId begin, DocId end) const +{ + // Here counting is as fast as existential querying + if (CountContains(pattern, begin, end) > 0) + return true; // FIXME No need to filter result set + return false; +} + +bool CSA::IsLessThan(uchar const * pattern) const +{ + if (CountLessThan(pattern) > 0) + return true; + return false; +} + +bool CSA::IsLessThan(uchar const * pattern, DocId begin, DocId end) const { - // TODO - assert(0); + if (CountLessThan(pattern, begin, end) > 0) + return true; return false; } @@ -465,6 +543,19 @@ unsigned CSA::CountPrefix(uchar const * pattern) const return CountEndmarkers(sp, ep); } +unsigned CSA::CountPrefix(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return numberOfAllTexts; + + TextPosition sp = 0, ep = 0; + Search(pattern, m, &sp, &ep); + + // Count end-markers in result interval + return CountEndmarkers(sp, ep, begin, end); +} + unsigned CSA::CountSuffix(uchar const * pattern) const { TextPosition m = strlen((char *)pattern); @@ -478,6 +569,19 @@ unsigned CSA::CountSuffix(uchar const * pattern) const return count; } +unsigned CSA::CountSuffix(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return numberOfAllTexts; + + TextPosition sp = 0, ep = 0; + // Search with end-marker + unsigned count = (unsigned) Search(pattern, m+1, &sp, &ep, begin, end); + + return count; +} + unsigned CSA::CountEqual(uchar const *pattern) const { TextPosition m = strlen((char const *)pattern); @@ -492,6 +596,20 @@ unsigned CSA::CountEqual(uchar const *pattern) const return CountEndmarkers(sp, ep); } +unsigned CSA::CountEqual(uchar const *pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char const *)pattern); + if (m == 0) + return numberOfAllTexts - numberOfTexts; // Empty texts. + + TextPosition sp = 0, ep = 0; + // Match including end-marker + Search(pattern, m+1, &sp, &ep, begin, end); + + // Count end-markers in result interval + return CountEndmarkers(sp, ep); // Already within [begin, end] +} + unsigned CSA::CountContains(uchar const * pattern) const { TextPosition m = strlen((char const *)pattern); @@ -499,16 +617,48 @@ unsigned CSA::CountContains(uchar const * pattern) const return numberOfAllTexts; // Total number of texts. // Here counting is as slow as fetching the result set - // because we would have to filter out occ's that fall within same document. + // because we have to filter out occ's that fall within same document. TextCollection::document_result result = Contains(pattern); return result.size(); } +unsigned CSA::CountContains(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char const *)pattern); + if (m == 0) + return numberOfAllTexts; // Total number of texts. + + // Here counting is as slow as fetching the result set + // because we have to filter out occ's that fall within same document. + TextCollection::document_result result = Contains(pattern, begin, end); + return result.size(); +} + +// Less than or equal unsigned CSA::CountLessThan(uchar const * pattern) const { - // TODO - assert(0); - return 0; + TextPosition m = strlen((char const *)pattern); + if (m == 0) + return numberOfAllTexts - numberOfTexts; // Empty texts. + + TextPosition sp = 0, ep = 0; + SearchLessThan(pattern, m, &sp, &ep); + + // Count end-markers in result interval + return CountEndmarkers(sp, ep); +} + +unsigned CSA::CountLessThan(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char const *)pattern); + if (m == 0) + return numberOfAllTexts - numberOfTexts; // Empty texts. + + TextPosition sp = 0, ep = 0; + SearchLessThan(pattern, m, &sp, &ep); + + // Count end-markers in result interval + return CountEndmarkers(sp, ep, begin, end); } /** @@ -551,6 +701,44 @@ TextCollection::document_result CSA::Prefix(uchar const * pattern) const return result; } +TextCollection::document_result CSA::Prefix(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // FIXME Should return all 1...k + + TextPosition sp = 0, ep = 0; + Search(pattern, m, &sp, &ep); + + TextCollection::document_result result; + + // Report end-markers in result interval + unsigned resultSize = CountEndmarkers(sp, ep); + if (resultSize == 0) + return result; + + result.reserve(resultSize); // Try to avoid reallocation. + + // Iterate through end-markers in [sp,ep] and [begin, end]: + unsigned i = 0; + if (sp > 0) + i = alphabetrank->rank(0, sp - 1); + while (resultSize) + { + // End-marker we found belongs to the "preceeding" doc in the collection + DocId docId = ((*endmarkerDocId)[i] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + if (docId >= begin && docId <= end) + result.push_back(docId); + + -- resultSize; + ++ i; + } + + return result; +} + TextCollection::document_result CSA::Suffix(uchar const * pattern) const { TextPosition m = strlen((char *)pattern); @@ -601,6 +789,57 @@ TextCollection::document_result CSA::Suffix(uchar const * pattern) const return result; } +TextCollection::document_result CSA::Suffix(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // FIXME Should return all 1...k + + TextPosition sp = 0, ep = 0; + // Search with end-marker + Search(pattern, m+1, &sp, &ep, begin, end); + + TextCollection::document_result result; + result.reserve(ep-sp+1); // Try to avoid reallocation. + + ulong sampled_rank_i = 0; + // Check each occurrence, already within [begin, end] + for (; sp <= ep; ++sp) + { + TextPosition i = sp; + + uchar c = alphabetrank->access(i); + while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) + { + i = C[c]+alphabetrank->rank(c,i)-1; + c = alphabetrank->access(i); + } + // Assert: c == '\0' OR sampled->IsBitSet(i) + + if (c == '\0') + { + // Rank among the end-markers in BWT + unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; + + // End-marker that we found belongs to the "preceeding" doc in collection: + DocId docId = ((*endmarkerDocId)[endmarkerRank] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + result.push_back(docId); + } + else // Sampled position + { + DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + result.push_back(docId); + } + } + + return result; +} + + TextCollection::document_result CSA::Equal(uchar const *pattern) const { TextPosition m = strlen((char const *)pattern); @@ -638,6 +877,43 @@ TextCollection::document_result CSA::Equal(uchar const *pattern) const return result; } +TextCollection::document_result CSA::Equal(uchar const *pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char const *)pattern); + if (m == 0) + return TextCollection::document_result(); // FIXME Should return all empty texts + + TextPosition sp = 0, ep = 0; + // Match including end-marker + Search(pattern, m+1, &sp, &ep, begin, end); + + TextCollection::document_result result; + + // Report end-markers in result interval + unsigned resultSize = CountEndmarkers(sp, ep); + if (resultSize == 0) + return result; + + result.reserve(resultSize); // Try to avoid reallocation. + + unsigned i = 0; + if (sp > 0) + i = alphabetrank->rank(0, sp - 1); + while (resultSize) + { + // End-marker we found belongs to the "previous" doc in the collection + DocId docId = ((*endmarkerDocId)[i] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + result.push_back(docId); // already within [begin, end] + + -- resultSize; + ++ i; + } + + return result; +} + TextCollection::document_result CSA::Contains(uchar const * pattern) const { @@ -688,11 +964,130 @@ TextCollection::document_result CSA::Contains(uchar const * pattern) const return result; } +TextCollection::document_result CSA::Contains(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); + + TextPosition sp = 0, ep = 0; + // Search all occurrences + Search(pattern, m, &sp, &ep); + + // We want unique document indentifiers, using std::set to collect them + std::set resultSet; + + ulong sampled_rank_i = 0; + // Check each occurrence + for (; sp <= ep; ++sp) + { + TextPosition i = sp; + uchar c = alphabetrank->access(i); + while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) + { + i = C[c]+alphabetrank->rank(c,i)-1; // LF-mapping + c = alphabetrank->access(i); + } + if (c == '\0') + { + // Rank among the end-markers in BWT + unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; + + // End-marker that we found belongs to the "preceeding" doc in collection: + DocId docId = ((*endmarkerDocId)[endmarkerRank] + 1) % numberOfTexts; + if (docId >= begin && docId <= end) + resultSet.insert(docId); + } + else + { + DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; + assert((unsigned)docId < numberOfTexts); + if (docId >= begin && docId <= end) + resultSet.insert(docId); + } + } + + // Convert std::set to std::vector + TextCollection::document_result result(resultSet.begin(), resultSet.end()); + // Map to doc ID's + for (document_result::iterator it = result.begin(); it != result.end(); ++it) + *it = emptyTextRank->select0(*it+1); + return result; +} + TextCollection::document_result CSA::LessThan(uchar const * pattern) const { - // TODO - assert(0); - return document_result(); + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // empty result set + + TextPosition sp = 0, ep = 0; + SearchLessThan(pattern, m, &sp, &ep); + + TextCollection::document_result result; + + // Report end-markers in result interval + unsigned resultSize = CountEndmarkers(sp, ep); + if (resultSize == 0) + return result; + + result.reserve(resultSize); // Try to avoid reallocation. + + // Iterate through end-markers in [sp,ep]: + unsigned i = 0; + if (sp > 0) + i = alphabetrank->rank(0, sp - 1); + while (resultSize) + { + // End-marker we found belongs to the "preceeding" doc in the collection + DocId docId = ((*endmarkerDocId)[i] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + result.push_back(docId); + + -- resultSize; + ++ i; + } + + return result; +} + +TextCollection::document_result CSA::LessThan(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return TextCollection::document_result(); // empty result set + + TextPosition sp = 0, ep = 0; + SearchLessThan(pattern, m, &sp, &ep); + + TextCollection::document_result result; + + // Report end-markers in result interval + unsigned resultSize = CountEndmarkers(sp, ep); + if (resultSize == 0) + return result; + + result.reserve(resultSize); // Try to avoid reallocation. + + // Iterate through end-markers in [sp,ep] and [begin, end]: + unsigned i = 0; + if (sp > 0) + i = alphabetrank->rank(0, sp - 1); + while (resultSize) + { + // End-marker we found belongs to the "preceeding" doc in the collection + DocId docId = ((*endmarkerDocId)[i] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + if (docId >= begin && docId <= end) + result.push_back(docId); + + -- resultSize; + ++ i; + } + + return result; } /** @@ -750,6 +1145,60 @@ TextCollection::full_result CSA::FullContains(uchar const * pattern) const return result; } +TextCollection::full_result CSA::FullContains(uchar const * pattern, DocId begin, DocId end) const +{ + TextPosition m = strlen((char *)pattern); + if (m == 0) + return full_result(); // FIXME Throw exception? + + TextPosition sp = 0, ep = 0; + // Search all occurrences + Search(pattern, m, &sp, &ep); + + full_result result; + result.reserve(ep-sp+1); // Try to avoid reallocation. + + ulong sampled_rank_i = 0; + // Report each occurrence + for (; sp <= ep; ++sp) + { + TextPosition i = sp; + TextPosition dist = 0; + uchar c = alphabetrank->access(i); + while (c != '\0' && !sampled->IsBitSet(i, &sampled_rank_i)) + { + i = C[c]+alphabetrank->rank(c,i)-1; + c = alphabetrank->access(i); + ++ dist; + } + if (c == '\0') + { + // Rank among the end-markers in BWT + unsigned endmarkerRank = alphabetrank->rank(0, i) - 1; + + // End-marker that we found belongs to the "preceeding" doc in collection: + DocId docId = ((*endmarkerDocId)[endmarkerRank] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + if (docId >= begin && docId <= end) + result.push_back(make_pair(docId, dist)); + } + else + { + TextPosition textPos = (*suffixes)[sampled_rank_i-1]+dist; //sampled->rank(i)-1] + dist; + DocId docId = (*suffixDocId)[sampled_rank_i-1]; //sampled->rank(i)-1]; +// textPos = textPos - (*textStartPos)[docId]; // Offset inside the text + + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + if (docId >= begin && docId <= end) + result.push_back(make_pair(docId, textPos)); + } + } + + return result; +} + /** * Save index to a file handle @@ -881,6 +1330,38 @@ void CSA::Load(FILE *file, unsigned samplerate) * Rest of the functions follow... */ + +// FIXME Use 2D-search structure +unsigned CSA::CountEndmarkers(TextPosition sp, TextPosition ep, DocId begin, DocId end) const +{ + if (sp > ep) + return 0; + + ulong ranksp = 0; + if (sp != 0) + ranksp = alphabetrank->rank(0, sp - 1); + + unsigned resultSize = alphabetrank->rank(0, ep) - ranksp; + if (resultSize == 0) + return 0; + + // Count end-markers in result interval and within [begin, end] + unsigned count = 0; + unsigned i = ranksp; + while (resultSize) + { + // End-marker we found belongs to the "previous" doc in the collection + DocId docId = ((*endmarkerDocId)[i] + 1) % numberOfTexts; + // Map to doc ID: + docId = emptyTextRank->select0(docId+1); + if (docId >= begin && docId <= end) + ++ count; + + -- resultSize; + ++ i; + } + return count; +} ulong CSA::Search(uchar const * pattern, TextPosition m, TextPosition *spResult, TextPosition *epResult) const { @@ -904,6 +1385,56 @@ ulong CSA::Search(uchar const * pattern, TextPosition m, TextPosition *spResult, return 0; } +ulong CSA::Search(uchar const * pattern, TextPosition m, TextPosition *spResult, TextPosition *epResult, DocId begin, DocId end) const +{ + // use the FM-search replacing function Occ(c,1,i) with alphabetrank->rank(c,i) + int c = (int)pattern[m-1]; + assert(c == 0); // Start from endmarkers + TextPosition i=m-1; + TextPosition sp = begin; + TextPosition ep = end; + while (sp<=ep && i>=1) + { +// printf("i = %lu, c = %c, sp = %lu, ep = %lu\n", i, pattern[i], sp, ep); + c = (int)pattern[--i]; + sp = C[c]+alphabetrank->rank(c,sp-1); + ep = C[c]+alphabetrank->rank(c,ep)-1; + } + *spResult = sp; + *epResult = ep; + if (sp<=ep) + return ep - sp + 1; + else + return 0; +} + + +ulong CSA::SearchLessThan(uchar const * pattern, TextPosition m, TextPosition *spResult, TextPosition *epResult) const +{ + // use the FM-search replacing function Occ(c,1,i) with alphabetrank->rank(c,i) + uint c = (int)pattern[m-1]; + TextPosition i=m-1; + TextPosition sp = 1; + TextPosition ep = C[c+1]-1; + while (sp<=ep && i>=1) + { +// printf("i = %lu, c = %c, sp = %lu, ep = %lu\n", i, pattern[i], sp, ep); + c = (int)pattern[--i]; + uint result = alphabetrank->rank(c,ep); + if (result == ~0u) + ep = 0; + else + ep = C[c]+result-1; + } + *spResult = sp; + *epResult = ep; + if (sp<=ep) + return ep - sp + 1; + else + return 0; +} + + CSA::~CSA() { #ifdef CSA_TEST_BWT delete dynFMI; @@ -942,13 +1473,21 @@ void CSA::makewavelet(uchar *bwt) // alphabetrank = new THuffAlphabetRank(bwt,n, this->codetable,0); // delete [] bwt; //alphabetrank = new RLWaveletTree(bwt, n); // Deletes bwt! +// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; +// std::cerr << "max heap usage before WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; + + HeapProfiler::ResetMaxHeapConsumption(); // FIXME remove alphabet_mapper * am = new alphabet_mapper_none(); - static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate? - wt_coder * wtc = new wt_coder_huff(bwt,n,am); + static_bitsequence_builder * bmb = new static_bitsequence_builder_rrr02(8); // FIXME samplerate? +// static_bitsequence_builder * bmb = new static_bitsequence_builder_brw32(16); // FIXME samplerate? + wt_coder * wtc = new wt_coder_binary(bwt,n,am); alphabetrank = new static_sequence_wvtree(bwt,n,wtc,bmb,am); delete bmb; - delete [] bwt; + bwt = 0; // already deleted + +// std::cerr << "heap usage: " << HeapProfiler::GetHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; +// std::cerr << "max heap usage after WT: " << HeapProfiler::GetMaxHeapConsumption()/(1024*1024) << " Mbytes" << std::endl; } void CSA::maketables() @@ -957,17 +1496,24 @@ void CSA::maketables() // Note: bwtEndPos already set! // and the length of the first text (counter l): #ifdef CSA_TEST_BWT - ulong i = 0; - ulong l = 1; - - uchar c = alphabetrank->access(i); - while (c != '\0') { - i = C[c]+alphabetrank->rank(c, i)-1; - ++l; - c = alphabetrank->access(i); + ulong i = 0; + ulong l = 1; + uint alphabetrank_i_tmp = 0; + uchar c = alphabetrank->access(i, alphabetrank_i_tmp); + while (c != '\0') + { + i = C[c]+alphabetrank_i_tmp-1; + ++l; + c = alphabetrank->access(i, alphabetrank_i_tmp); + } + + if (i != bwtEndPos) // compare result + { + std::cout << "i = " << i << ", bwtendpos = " << bwtEndPos << std::endl; + exit(0); + } } - assert(i == bwtEndPos); // compare result #endif // Build up arrays for text length and starting positions @@ -997,6 +1543,7 @@ void CSA::maketables() DocId textId = numberOfTexts; ulong ulongmax = 0; ulongmax--; + uint alphabetrank_i_tmp =0; //positions: for (ulong i=n-1;iaccess(p); + uchar c = alphabetrank->access(p, alphabetrank_i_tmp); if (c == '\0') { --textId; // Record the order of end-markers in BWT: - ulong endmarkerRank = alphabetrank->rank(0, p) - 1; + ulong endmarkerRank = alphabetrank_i_tmp - 1; (*endmarkerDocId)[endmarkerRank] = textId; // Store text length and text start position: @@ -1031,7 +1578,7 @@ void CSA::maketables() p = textId; // Correct LF-mapping to the last char of the previous text. } else - p = C[c]+alphabetrank->rank(c, p)-1; + p = C[c]+alphabetrank_i_tmp-1; } assert(textId == 0);