Caching SearXNG

This revision is from 2024/06/24 18:33. You can Restore it.

SearXNG installs itself on /usr/local/searxng/searxng-src, with the main source code in searxng-src directory.

Interesting files are...

  1. webapp.py in /usr/local/searxng/searxng-src/searx/webapp.py : def search()
  2. __init__.py in /usr/local/searxng/searxng-src/searx/search/__init__.py : class Search

A cache implentation here...

  • making a directory in the searx folder named cache
  • make a sub-folder for every possible character in the cache directory, for instance a to z and 0 to 9

  • the cache files are named by and are indentical to the search term
  • check if the filename exists when a search is performed
  • if there is a match read in the local file instead and defer the search
  • send the keywords to cache maintainers so they can update the cache. They can then crawl the search engines and build a more comprehensive cache over time.
  • the user updates their cache, by downloading and appending a distributed database.

Benefits: Why do this?

Imagine a man in the middle that knows your search term before you and performs the search prior, and then returns the result instantly when you pressed enter. The result is the same, except it would be very much faster. That is what a cache does, it speeds up the process. It also allows for a more comprehensive search, if I could perform searches across all the search engines, compile, optimize and store that data on disk all the while awaiting the user to search the keyword, when the term was searched again, the result would be not only fast but comprehensive.

Moreover, it could turn searXNG into a full search engine built from caching results, secondly offline searching becomes possible if the cache gets big enough.

Searx is privacy focused search engine, so disclosure to end user that however anonymous, caching requires keywords/search term sharing. That is how the cache is built. Opt out.

Proposed searXNG options:

  • use cache
  • update the cache daily

Make the cache directories

sudo mkdir -p /usr/local/searxng/searxng-src/searx/cache

sudo mkdir -p /usr/local/searxng/searxng-src/searx/cache/\! \@ \# \$ \% \& \? a b c d e f g h i j k l m n o p q r s t u v w x y z 0 1 2 3 4 5 6 7 8 9

sudo chown -R root:searxng /usr/local/searxng/searxng-src/searx/cache

sudo chmod -R 777 /usr/local/searxng/searxng-src/searx/cache

File: webapp.py

The cache filename is the search_term, keyword. So its a file exists if else. Get the first letter of the search term to determine which directory, then complete path to determine if file eists, if the file exists return the json file otherwise perform the search.

fname = request.form['q'] + str(search_query.pageno) + str(search_query.categories[0])

first_char = fname[:1].lower()

if not first_char.isalnum():

first_char = '#'

# file_path = os.path.abspath(os.path.join("cache", first_char, fname))

file_path = os.path.abspath(os.path.join("cache", fname[:1].lower(), fname))

if not os.path.exists(file_path):

responsex = webutils.get_json_response(search_query, result_container)

if len(responsex.strip()) > 1000: # Checking length greater than 2 to ensure it's not just '{}'

with open(file_path, "w") as text_file:

text_file.write(responsex) # json.dump(responsex, text_file)

File: __init__.py

class Search:

"""Search information container"""

__slots__ = "search_query", "result_container", "start_time", "actual_timeout"

def search_multiple_requests(self, requests):

# pylint: disable=protected-access

search_id = str(uuid4())

for engine_name, query, request_params in requests:

_search = copy_current_request_context(PROCESSORS[engine_name].search)

th = threading.Thread( # pylint: disable=invalid-name

target=_search,

args=(query, request_params, self.result_container, self.start_time, self.actual_timeout),

name=search_id,

)

th._timeout = False

th._engine_name = engine_name

th.start()

for th in threading.enumerate(): # pylint: disable=invalid-name

if th.name == search_id:

remaining_time = max(0.0, self.actual_timeout - (default_timer() - self.start_time))

th.join(remaining_time)

if th.is_alive():

th._timeout = True

self.result_container.add_unresponsive_engine(th._engine_name, 'timeout')

PROCESSORS[th._engine_name].logger.error('engine timeout')

def search_multiple_requests2(self, requests):

# pylint: disable=protected-access

search_id = str(uuid4())

mock_result_container = ResultContainer()

mock_results = [{'url': f'Mock Result {i}', 'content': ''} for i in range(1, 6)]

threads = []

for engine_name, _, _ in requests:

th = threading.Thread(

target=self.mock_search_function,

args=(engine_name, mock_results, mock_result_container),

name=search_id,

)

th._timeout = False

th._engine_name = engine_name

th.start()

threads.append(th)

remaining_time = None

for th in threads:

if th.name == search_id:

if remaining_time is None:

remaining_time = self.actual_timeout - (default_timer() - self.start_time)

th.join(remaining_time)

if th.is_alive():

th._timeout = True

self.result_container.add_unresponsive_engine(th._engine_name, 'timeout')

PROCESSORS[th._engine_name].logger.error('engine timeout')

# Wait for all threads to finish, even if some have timed out

for th in threads:

th.join()

# Copy the mock results to the actual result_container

self.result_container = mock_result_container

def mock_search_function(self, engine_name, mock_results, result_container):

# This is a mock search function

time.sleep(0.1) # Simulate some processing time

result_container.extend(engine_name, mock_results)

def search_standard(self):

"""

Update self.result_container, self.actual_timeout

"""

requests, self.actual_timeout = self._get_requests()

cache_dir = 'cache'

query_file_path = os.path.join(cache_dir, self.search_query.query)

# send all search-request

if requests:

# Check if the file exists in the cache directory

if os.path.isfile(query_file_path):

self.search_multiple_requests2(requests)

else:

self.search_multiple_requests(requests)

# return results, suggestions, answers and infoboxes

return True

  

📝 📜 ⏱️ ⬆️