Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- document.h
- #pragma once
- enum class DocumentStatus {
- ACTUAL,
- IRRELEVANT,
- BANNED,
- REMOVED,
- };
- struct Document {
- Document() = default;
- Document(int id, double relevance, int rating);
- int id = 0;
- double relevance = 0.0;
- int rating = 0;
- };
- ***************************************************************************************************************************************
- paginator.h
- #pragma once
- #include <iostream>
- template <typename Iterator>
- struct IteratorRange {
- Iterator begin;
- Iterator end;
- IteratorRange(Iterator begin, Iterator end) :begin(begin), end(end) {}
- };
- template <typename Iterator>
- class Paginator {
- public:
- Paginator(Iterator begin, Iterator end, int size)
- :page_size_(size) {
- vector test(begin, end);
- Iterator temp = begin;
- for (; temp + size < end; temp += size) {
- pages_.push_back(IteratorRange(temp, temp + size));
- }
- if (temp < end) {
- pages_.push_back(IteratorRange(temp, end));
- }
- }
- auto begin() const {
- return pages_.begin();
- }
- auto end() const {
- return pages_.end();
- }
- int size() const {
- return page_size_;
- }
- private:
- int page_size_;
- vector<IteratorRange<Iterator>> pages_;
- };
- template <typename Container>
- auto Paginate(const Container& c, size_t page_size) {
- return Paginator(begin(c), end(c), page_size);
- }
- template<typename Iterator>
- ostream& operator<< (ostream& out, IteratorRange<Iterator> p) {
- for (auto i = p.begin; i < p.end; i++) {
- out << *i;
- }
- return out;
- }
- ***************************************************************************************************************************************
- read_input_functions.h
- #pragma once
- #include <iostream>
- #include <vector>
- #include <string>
- std::string ReadLine();
- int ReadLineWithNumber();
- std::vector<std::string> SplitIntoWords(const std::string&);
- ***************************************************************************************************************************************
- remove_duplicates.h
- #pragma once
- #include "search_server.h"
- void RemoveDuplicates(SearchServer& search_server);
- ***************************************************************************************************************************************
- request_queue.h
- #pragma once
- #include <iostream>
- #include <vector>
- #include <deque>
- #include "document.h"
- #include "search_server.h"
- class RequestQueue {
- public:
- explicit RequestQueue(const SearchServer& search_server);
- // сделаем "обертки" для всех методов поиска, чтобы сохранять результаты для нашей статистики
- template <typename DocumentPredicate>
- std::vector<Document> AddFindRequest(const string& raw_query, DocumentPredicate document_predicate) {
- const auto result = search_server_.FindTopDocuments(raw_query, document_predicate);
- AddRequest(result.size());
- return result;
- }
- vector<Document> AddFindRequest(const string& raw_query, DocumentStatus status);/* {
- const auto result = search_server_.FindTopDocuments(raw_query, status);
- AddRequest(result.size());
- return result;
- }*/
- vector<Document> AddFindRequest(const string& raw_query); /*{
- const auto result = search_server_.FindTopDocuments(raw_query);
- AddRequest(result.size());
- return result;
- }*/
- int GetNoResultRequests() const;
- private:
- struct QueryResult {
- uint64_t timestamp;
- int results;
- };
- std::deque<QueryResult> requests_;
- const SearchServer& search_server_;
- int no_results_requests_;
- uint64_t current_time_;
- const static int min_in_day_ = 1440;
- void AddRequest(int results_num);
- };
- ***************************************************************************************************************************************
- search_server.h
- #pragma once
- #include <iostream>
- #include <algorithm>
- #include <map>
- #include <cmath>
- #include <vector>
- #include "document.h"
- #include "read_input_functions.h"
- #include "string_processing.h"
- const int MAX_RESULT_DOCUMENT_COUNT = 5;
- const double VALUE = 1e-6;
- class SearchServer {
- public:
- template <typename StringContainer>
- explicit SearchServer(const StringContainer& stop_words);
- explicit SearchServer(const string& stop_words_text);
- void AddDocument(int document_id, const string& document, DocumentStatus status,
- const vector<int>& ratings);
- template <typename DocumentPredicate>
- vector<Document> FindTopDocuments(const string& raw_query,
- DocumentPredicate document_predicate) const;
- vector<Document> FindTopDocuments(const string& raw_query, DocumentStatus status) const;
- vector<Document> FindTopDocuments(const string& raw_query) const;
- int GetDocumentCount() const;
- set<int>::const_iterator begin() const;
- set<int>::const_iterator end() const;
- tuple<vector<string>, DocumentStatus> MatchDocument(const string& raw_query,
- int document_id) const;
- const map<string, double>& GetWordFrequencies(int document_id) const;
- void RemoveDocument(int document_id);
- bool IsStopWord(const string& word) const;
- private:
- struct DocumentData {
- int rating;
- DocumentStatus status;
- };
- const set<string> stop_words_;
- map<string, map<int, double>> word_to_document_freqs_;
- map<int, DocumentData> documents_;
- set<int> document_ids_;
- map<int, map<string, double>> remove_word_to_document_freqs_;
- static bool IsValidWord(const string& word);
- vector<string> SplitIntoWordsNoStop(const string& text) const;
- static int ComputeAverageRating(const vector<int>& ratings);
- struct QueryWord {
- string data;
- bool is_minus;
- bool is_stop;
- };
- QueryWord ParseQueryWord(const string& text) const;
- struct Query {
- set<string> plus_words;
- set<string> minus_words;
- };
- Query ParseQuery(const string& text) const;
- // Existence required
- double ComputeWordInverseDocumentFreq(const string& word) const;
- template <typename DocumentPredicate>
- vector<Document> FindAllDocuments(const Query& query,
- DocumentPredicate document_predicate) const;
- };
- template <typename StringContainer>
- SearchServer::SearchServer(const StringContainer& stop_words)
- : stop_words_(MakeUniqueNonEmptyStrings(stop_words)) // Extract non-empty stop words
- {
- if (!all_of(stop_words_.begin(), stop_words_.end(), IsValidWord)) {
- throw invalid_argument("Some of stop words are invalid"s);
- }
- }
- template <typename DocumentPredicate>
- vector<Document> SearchServer::FindTopDocuments(const string& raw_query,
- DocumentPredicate document_predicate) const {
- const auto query = ParseQuery(raw_query);
- auto matched_documents = FindAllDocuments(query, document_predicate);
- sort(matched_documents.begin(), matched_documents.end(), [](const Document& lhs, const Document& rhs) {
- if (std::abs(lhs.relevance - rhs.relevance) < VALUE) {
- return lhs.rating > rhs.rating;
- }
- else {
- return lhs.relevance > rhs.relevance;
- }
- });
- if (matched_documents.size() > MAX_RESULT_DOCUMENT_COUNT) {
- matched_documents.resize(MAX_RESULT_DOCUMENT_COUNT);
- }
- return matched_documents;
- }
- template <typename DocumentPredicate>
- vector<Document> SearchServer::FindAllDocuments(const Query& query,
- DocumentPredicate document_predicate) const {
- map<int, double> document_to_relevance;
- for (const string& word : query.plus_words) {
- if (word_to_document_freqs_.count(word) == 0) {
- continue;
- }
- const double inverse_document_freq = ComputeWordInverseDocumentFreq(word);
- for (const auto& [document_id, term_freq] : word_to_document_freqs_.at(word)) {
- const auto& document_data = documents_.at(document_id);
- if (document_predicate(document_id, document_data.status, document_data.rating)) {
- document_to_relevance[document_id] += term_freq * inverse_document_freq;
- }
- }
- }
- for (const string& word : query.minus_words) {
- if (word_to_document_freqs_.count(word) == 0) {
- continue;
- }
- for (const auto& [document_id, _] : word_to_document_freqs_.at(word)) {
- document_to_relevance.erase(document_id);
- }
- }
- vector<Document> matched_documents;
- for (const auto& [document_id, relevance] : document_to_relevance) {
- matched_documents.push_back(
- { document_id, relevance, documents_.at(document_id).rating });
- }
- return matched_documents;
- }
- ***************************************************************************************************************************************
- string_processing.h
- #pragma once
- #include <iostream>
- #include <set>
- #include "document.h"
- using namespace std;
- template <typename StringContainer>
- std::set<std::string> MakeUniqueNonEmptyStrings(const StringContainer& strings) {
- std::set<std::string> non_empty_strings;
- for (const std::string& str : strings) {
- if (!str.empty()) {
- non_empty_strings.insert(str);
- }
- }
- return non_empty_strings;
- }
- std::ostream& operator << (std::ostream& out, const Document search);
- ***************************************************************************************************************************************
- document.cpp
- #include "document.h"
- Document::Document(int id, double relevance, int rating)
- : id(id)
- , relevance(relevance)
- , rating(rating) {
- }
- ***************************************************************************************************************************************
- main.cpp
- #include "search_server.h"
- #include "request_queue.h"
- #include "paginator.h"
- #include "remove_duplicates.h"
- int main() {
- SearchServer search_server("and with"s);
- search_server.AddDocument(1, "funny pet and nasty rat"s, DocumentStatus::ACTUAL, { 7, 2, 7 });
- search_server.AddDocument(2, "funny pet with curly hair"s, DocumentStatus::ACTUAL, { 1, 2 });
- // дубликат документа 2, будет удалён
- search_server.AddDocument(3, "funny pet with curly hair"s, DocumentStatus::ACTUAL, { 1, 2 });
- // отличие только в стоп-словах, считаем дубликатом
- search_server.AddDocument(4, "funny pet and curly hair"s, DocumentStatus::ACTUAL, { 1, 2 });
- // множество слов такое же, считаем дубликатом документа 1
- search_server.AddDocument(5, "funny funny pet and nasty nasty rat"s, DocumentStatus::ACTUAL, { 1, 2 });
- // добавились новые слова, дубликатом не является
- search_server.AddDocument(6, "funny pet and not very nasty rat"s, DocumentStatus::ACTUAL, { 1, 2 });
- // множество слов такое же, как в id 6, несмотря на другой порядок, считаем дубликатом
- search_server.AddDocument(7, "very nasty rat and not very funny pet"s, DocumentStatus::ACTUAL, { 1, 2 });
- // есть не все слова, не является дубликатом
- search_server.AddDocument(8, "pet with rat and rat and rat"s, DocumentStatus::ACTUAL, { 1, 2 });
- // слова из разных документов, не является дубликатом
- search_server.AddDocument(9, "nasty rat with curly hair"s, DocumentStatus::ACTUAL, { 1, 2 });
- cout << "Before duplicates removed: "s << search_server.GetDocumentCount() << endl;
- RemoveDuplicates(search_server);
- cout << "After duplicates removed: "s << search_server.GetDocumentCount() << endl;
- }
- ***************************************************************************************************************************************
- read_input_functions.cpp
- #include "read_input_functions.h"
- std::string ReadLine() {
- std::string s;
- std::getline(std::cin, s);
- return s;
- }
- int ReadLineWithNumber() {
- int result;
- std::cin >> result;
- ReadLine();
- return result;
- }
- std::vector<std::string> SplitIntoWords(const std::string& text) {
- std::vector<std::string> words;
- std::string word;
- for (const char c : text) {
- if (c == ' ') {
- if (!word.empty()) {
- words.push_back(word);
- word.clear();
- }
- }
- else {
- word += c;
- }
- }
- if (!word.empty()) {
- words.push_back(word);
- }
- return words;
- }
- ***************************************************************************************************************************************
- remove_duplicates.cpp
- #include "remove_duplicates.h"
- void RemoveDuplicates(SearchServer& search_server) {
- std::set<int> id_remove;
- map<set<string>, int> unique_doc;
- for (const auto& document_id : search_server)
- {
- map<string, double> words_in_browse = search_server.GetWordFrequencies(document_id); // получил документ
- set<string> uniq_word;
- for (const auto& [word, stat] : words_in_browse)
- {
- uniq_word.insert(word);
- }
- if (unique_doc.count(uniq_word))
- {
- id_remove.insert(document_id);
- }
- else
- {
- unique_doc.insert({ uniq_word, document_id });
- }
- }
- for (const auto& l : id_remove)
- {
- cout << "Found duplicate document id " << l << endl;
- search_server.RemoveDocument(l);
- }
- }
- ***************************************************************************************************************************************
- request_queue.cpp
- #include "request_queue.h"
- RequestQueue::RequestQueue(const SearchServer& search_server)
- : search_server_(search_server)
- , no_results_requests_(0)
- , current_time_(0) {
- }
- vector<Document> RequestQueue::AddFindRequest(const string& raw_query) {
- const auto result = search_server_.FindTopDocuments(raw_query);
- AddRequest(result.size());
- return result;
- }
- vector<Document> RequestQueue::AddFindRequest(const string& raw_query, DocumentStatus status) {
- const auto result = search_server_.FindTopDocuments(raw_query, status);
- AddRequest(result.size());
- return result;
- }
- int RequestQueue::GetNoResultRequests() const {
- return no_results_requests_;
- }
- void RequestQueue::AddRequest(int results_num) {
- // новый запрос - новая секунда
- ++current_time_;
- // удаляем все результаты поиска, которые устарели
- while (!requests_.empty() && min_in_day_ <= current_time_ - requests_.front().timestamp) {
- if (0 == requests_.front().results) {
- --no_results_requests_;
- }
- requests_.pop_front();
- }
- // сохраняем новый результат поиска
- requests_.push_back({ current_time_, results_num });
- if (0 == results_num) {
- ++no_results_requests_;
- }
- }
- ***************************************************************************************************************************************
- search_server.cpp
- #include "search_server.h"
- SearchServer::SearchServer(const string& stop_words_text)
- : SearchServer(
- SplitIntoWords(stop_words_text)) // Invoke delegating constructor from string container
- {
- }
- void SearchServer::AddDocument(int document_id, const string& document, DocumentStatus status,
- const vector<int>& ratings) {
- if ((document_id < 0) || (documents_.count(document_id) > 0)) {
- throw invalid_argument("Invalid document_id"s);
- }
- const auto words = SplitIntoWordsNoStop(document);
- const double inv_word_count = 1.0 / words.size();
- for (const string& word : words) {
- remove_word_to_document_freqs_[document_id][word] += inv_word_count;
- word_to_document_freqs_[word][document_id] += inv_word_count;
- }
- documents_.emplace(document_id, DocumentData{ ComputeAverageRating(ratings), status });
- document_ids_.insert(document_id);
- }
- vector<Document> SearchServer::FindTopDocuments(const string& raw_query, DocumentStatus status) const {
- return FindTopDocuments(
- raw_query, [status](int document_id, DocumentStatus document_status, int rating) {
- return document_status == status;
- });
- }
- vector<Document> SearchServer::FindTopDocuments(const string& raw_query) const {
- return FindTopDocuments(raw_query, DocumentStatus::ACTUAL);
- }
- int SearchServer::GetDocumentCount() const {
- return documents_.size();
- }
- const map<string, double>& SearchServer::GetWordFrequencies(int document_id) const
- {
- map <string, double> empty_map;
- if (remove_word_to_document_freqs_.count(document_id))
- {
- return remove_word_to_document_freqs_.at(document_id);
- }
- return empty_map;
- }
- void SearchServer::RemoveDocument(int document_id)
- {
- if (document_ids_.find(document_id) != document_ids_.end())
- {
- for (const auto& [word, stat] : remove_word_to_document_freqs_[document_id])
- {
- auto erase_word = word_to_document_freqs_[word].find(document_id); // нахожу слово в нужном мне документе
- word_to_document_freqs_[word].erase(erase_word); // удаляю данные к слову из документа
- }
- }
- documents_.erase(document_id);
- document_ids_.erase(document_id);
- remove_word_to_document_freqs_.erase(document_id);
- }
- set<int>::const_iterator SearchServer::begin() const
- {
- return document_ids_.begin();
- }
- set<int>::const_iterator SearchServer::end() const
- {
- return document_ids_.end();
- }
- tuple<vector<string>, DocumentStatus> SearchServer::MatchDocument(const string& raw_query,
- int document_id) const {
- const auto query = ParseQuery(raw_query);
- vector<string> matched_words;
- for (const string& word : query.plus_words) {
- if (word_to_document_freqs_.count(word) == 0) {
- continue;
- }
- if (word_to_document_freqs_.at(word).count(document_id)) {
- matched_words.push_back(word);
- }
- }
- for (const string& word : query.minus_words) {
- if (word_to_document_freqs_.count(word) == 0) {
- continue;
- }
- if (word_to_document_freqs_.at(word).count(document_id)) {
- matched_words.clear();
- break;
- }
- }
- return { matched_words, documents_.at(document_id).status };
- }
- bool SearchServer::IsStopWord(const string& word) const {
- return stop_words_.count(word) > 0;
- }
- bool SearchServer::IsValidWord(const string& word) {
- // A valid word must not contain special characters
- return none_of(word.begin(), word.end(), [](char c) {
- return c >= '\0' && c < ' ';
- });
- }
- vector<string> SearchServer::SplitIntoWordsNoStop(const string& text) const {
- vector<string> words;
- for (const string& word : SplitIntoWords(text)) {
- if (!IsValidWord(word)) {
- throw invalid_argument("Word "s + word + " is invalid"s);
- }
- if (!IsStopWord(word)) {
- words.push_back(word);
- }
- }
- return words;
- }
- int SearchServer::ComputeAverageRating(const vector<int>& ratings) {
- if (ratings.empty()) {
- return 0;
- }
- int rating_sum = 0;
- for (const int rating : ratings) {
- rating_sum += rating;
- }
- return rating_sum / static_cast<int>(ratings.size());
- }
- SearchServer::QueryWord SearchServer::ParseQueryWord(const string& text) const {
- if (text.empty()) {
- throw invalid_argument("Query word is empty"s);
- }
- string word = text;
- bool is_minus = false;
- if (word[0] == '-') {
- is_minus = true;
- word = word.substr(1);
- }
- if (word.empty() || word[0] == '-' || !IsValidWord(word)) {
- throw invalid_argument("Query word "s + text + " is invalid");
- }
- return { word, is_minus, IsStopWord(word) };
- }
- SearchServer::Query SearchServer::ParseQuery(const string& text) const {
- Query result;
- for (const string& word : SplitIntoWords(text)) {
- const auto query_word = ParseQueryWord(word);
- if (!query_word.is_stop) {
- if (query_word.is_minus) {
- result.minus_words.insert(query_word.data);
- }
- else {
- result.plus_words.insert(query_word.data);
- }
- }
- }
- return result;
- }
- double SearchServer::ComputeWordInverseDocumentFreq(const std::string& word) const {
- return std::log(GetDocumentCount() * 1.0 /word_to_document_freqs_.at(word).size());
- }
- ***************************************************************************************************************************************
- string_processing.cpp
- #include "string_processing.h"
- std::ostream& operator << (std::ostream& out, const Document search) {
- return out << "{ document_id = " << search.id << ", relevance = " << search.relevance << ", rating = " << search.rating << " }";
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement