Caching SearXNG

This revision is from 2024/06/24 19:08. You can Restore it.

SearXNG installs itself on /usr/local/searxng/searxng-src, with the main source code in searxng-src directory.

Interesting files are...

  1. webapp.py in /usr/local/searxng/searxng-src/searx/webapp.py : def search()
  2. __init__.py in /usr/local/searxng/searxng-src/searx/search/__init__.py : class Search

A cache implentation here...

  • making a directory in the searx folder named cache
  • make a sub-folder for every possible character in the cache directory, for instance a to z and 0 to 9

  • the cache files are named by and are indentical to the search term
  • check if the filename exists when a search is performed
  • if there is a match read in the local file instead and defer the search
  • send the keywords to cache maintainers so they can update the cache. They can then crawl the search engines and build a more comprehensive cache over time.
  • the user updates their cache, by downloading and appending a distributed database.

Benefits: Why do this?

Imagine a man in the middle that knows your search term before you and performs the search prior, and then returns the result instantly when you pressed enter. The result is the same, except it would be very much faster. That is what a cache does, it speeds up the process. It also allows for a more comprehensive search, if I could perform searches across all the search engines, compile, optimize and store that data on disk all the while awaiting the user to search the keyword, when the term was searched again, the result would be not only fast but comprehensive.

Moreover, it could turn searXNG into a full search engine built from caching results, secondly offline searching becomes possible if the cache gets big enough.

Searx is privacy focused search engine, so disclosure to end user that however anonymous, caching requires keywords/search term sharing. That is how the cache is built. Opt out.

Proposed searXNG options:

  • use cache
  • update the cache daily

Make the cache directories

sudo mkdir -p /usr/local/searxng/searxng-src/searx/cache

sudo mkdir -p /usr/local/searxng/searxng-src/searx/cache/\! \@ \# \$ \% \& \? a b c d e f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9

sudo chown -R root:searxng /usr/local/searxng/searxng-src/searx/cache

sudo chmod -R 777 /usr/local/searxng/searxng-src/searx/cache

File: webapp.py: def search(): Line 625: Inject Line 662

The cache filename is the search_term, keyword. So its a file exists if else. Get the first letter of the search term to determine which directory, then complete path to determine if file exists, if the file exists return the json file otherwise perform the search.

def search():

"""Search query in q and return results.

Supported outputs: html, json, csv, rss.

"""

# pylint: disable=too-many-locals, too-many-return-statements, too-many-branches

# pylint: disable=too-many-statements

# output_format

output_format = request.form.get('format', 'html')

if output_format not in OUTPUT_FORMATS:

output_format = 'html'

if output_format not in settings['search']['formats']:

flask.abort(403)

# check if there is query (not None and not an empty string)

if not request.form.get('q'):

if output_format == 'html':

return render(

# fmt: off

'index.html',

selected_categories=get_selected_categories(request.preferences, request.form),

# fmt: on

)

return index_error(output_format, 'No query'), 400

# search

search_query = None

raw_text_query = None

result_container = None

try:

search_query, raw_text_query, _, _, selected_locale = get_search_query_from_webapp(

request.preferences, request.form

)

search = SearchWithPlugins(search_query, request.user_plugins, request) # pylint: disable=redefined-outer-name

result_container = search.search()

############ Start new code

fname = request.form['q'] + str(search_query.pageno) + str(search_query.categories[0])

first_char = fname[:1].lower()

if not first_char.isalnum():

first_char = '#'

file_path = os.path.abspath(os.path.join("cache", fname[:1].lower(), fname))

if not os.path.exists(file_path):

responsex = webutils.get_json_response(search_query, result_container)

if len(responsex.strip()) > 1000: # Checking length greater than 2 to ensure it's not just '{}'

with open(file_path, "w") as text_file:

text_file.write(responsex) # json.dump(responsex, text_file)

############# End new code

except SearxParameterException as e:

logger.exception('search error: SearxParameterException')

return index_error(output_format, e.message), 400

except Exception as e: # pylint: disable=broad-except

logger.exception(e, exc_info=True)

return index_error(output_format, gettext('search error')), 500

File: __init__.py in search

class Search:

"""Search information container"""

__slots__ = "search_query", "result_container", "start_time", "actual_timeout"

def search_multiple_requests(self, requests):

# pylint: disable=protected-access

search_id = str(uuid4())

for engine_name, query, request_params in requests:

_search = copy_current_request_context(PROCESSORS[engine_name].search)

th = threading.Thread( # pylint: disable=invalid-name

target=_search,

args=(query, request_params, self.result_container, self.start_time, self.actual_timeout),

name=search_id,

)

th._timeout = False

th._engine_name = engine_name

th.start()

for th in threading.enumerate(): # pylint: disable=invalid-name

if th.name == search_id:

remaining_time = max(0.0, self.actual_timeout - (default_timer() - self.start_time))

th.join(remaining_time)

if th.is_alive():

th._timeout = True

self.result_container.add_unresponsive_engine(th._engine_name, 'timeout')

PROCESSORS[th._engine_name].logger.error('engine timeout')

def search_multiple_requests2(self, requests):

# pylint: disable=protected-access

search_id = str(uuid4())

mock_result_container = ResultContainer()

# Modify the path to load the JSON data

cache_dir = 'cache'

query_dir = os.path.join(cache_dir, self.search_query.query[0].lower())

fname = self.search_query.query.lower() + str(self.search_query.pageno) + str(self.search_query.categories[0])

# fname = self.search_query.query + str(self.search_query.pageno)

mock_data_filename = os.path.join(query_dir, fname)

with open(mock_data_filename, encoding='utf-8') as mock_data_file:

mock_data = json.load(mock_data_file)

mock_results = mock_data['results'] # Extract 'results' from the JSON data

threads = []

for engine_name, _, _ in requests:

th = threading.Thread(

target=self.mock_search_function,

args=(engine_name, mock_results, mock_result_container),

name=search_id,

)

th._timeout = False

th._engine_name = engine_name

th.start()

threads.append(th)

remaining_time = None

for th in threads:

if th.name == search_id:

if remaining_time is None:

remaining_time = self.actual_timeout - (default_timer() - self.start_time)

th.join(remaining_time)

if th.is_alive():

th._timeout = True

self.result_container.add_unresponsive_engine(th._engine_name, 'timeout')

PROCESSORS[th._engine_name].logger.error('engine timeout')

# Wait for all threads to finish, even if some have timed out

for th in threads:

th.join()

# Copy the mock results to the actual result_container

self.result_container = mock_result_container

def mock_search_function(self, engine_name, mock_results, result_container):

# This is a mock search function

time.sleep(0.1) # Simulate some processing time

# Convert 'publishedDate' string to datetime object

for result in mock_results:

if 'publishedDate' in result:

if isinstance(result['publishedDate'], str):

result['publishedDate'] = datetime.fromisoformat(result['publishedDate'])

result_container.extend(engine_name, mock_results)

def search_standard(self):

"""

Update self.result_container, self.actual_timeout

"""

requests, self.actual_timeout = self._get_requests()

# Modify the path to load the JSON data

cache_dir = 'cache'

query_dir = os.path.join(cache_dir, self.search_query.query[0].lower()) # Force entire query to lowercase

fname = self.search_query.query.lower() + str(self.search_query.pageno) + str(self.search_query.categories[0]) # Force entire file name to lowercase

mock_data_filename = os.path.join(query_dir, fname)

# with open('categories.txt', 'w') as f: # f.write(str(self.search_query.categories))

# send all search-request

if requests:

# Check if the file exists in the cache directory

if os.path.isfile(mock_data_filename): # and self.search_query.categories[0] == 'general':

self.search_multiple_requests2(requests)

else:

self.search_multiple_requests(requests)

# if os.path.isfile(mock_data_filename): # self.search_multiple_requests2(requests) # else: # self.search_multiple_requests(requests)

# return results, suggestions, answers and infoboxes

return True

  

📝 📜 ⏱️ ⬆️