Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions src/crawlee/_utils/http.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
"""HTTP utility functions for Crawlee."""

from __future__ import annotations

from datetime import datetime, timedelta, timezone


def parse_retry_after_header(value: str | None) -> timedelta | None:
"""Parse the Retry-After HTTP header value.

The header can contain either a number of seconds or an HTTP-date.
See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After

Args:
value: The raw Retry-After header value.

Returns:
A timedelta representing the delay, or None if the header is missing or unparseable.

Check warning on line 18 in src/crawlee/_utils/http.py

View workflow job for this annotation

GitHub Actions / Code checks / Spell check

"unparseable" should be "unparsable".
"""
if not value:
return None

# Try parsing as integer seconds first.
try:
seconds = int(value)
return timedelta(seconds=seconds)
except ValueError:
pass

# Try parsing as HTTP-date (e.g., "Wed, 21 Oct 2015 07:28:00 GMT").
from email.utils import parsedate_to_datetime # noqa: PLC0415

try:
retry_date = parsedate_to_datetime(value)
delay = retry_date - datetime.now(retry_date.tzinfo or timezone.utc)
if delay.total_seconds() > 0:
return delay
except (ValueError, TypeError):
pass

return None
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,12 @@ async def _handle_status_code_response(
"""
status_code = context.http_response.status_code
if self._retry_on_blocked:
self._raise_for_session_blocked_status_code(context.session, status_code)
self._raise_for_session_blocked_status_code(
context.session,
status_code,
url=context.request.url,
retry_after_header=context.http_response.headers.get('retry-after'),
)
self._raise_for_error_status_code(status_code)
yield context

Expand Down
51 changes: 46 additions & 5 deletions src/crawlee/crawlers/_basic/_basic_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from asyncio import CancelledError
from collections.abc import AsyncGenerator, Awaitable, Callable, Iterable, Sequence
from contextlib import AsyncExitStack, suppress
from datetime import timedelta
from datetime import datetime, timedelta, timezone
from functools import partial
from io import StringIO
from pathlib import Path
Expand Down Expand Up @@ -46,6 +46,8 @@
from crawlee._utils.docs import docs_group
from crawlee._utils.file import atomic_write, export_csv_to_stream, export_json_to_stream
from crawlee._utils.recurring_task import RecurringTask
from crawlee._utils.http import parse_retry_after_header
from crawlee.request_loaders import ThrottlingRequestManager
from crawlee._utils.robots import RobotsTxtFile
from crawlee._utils.urls import convert_to_absolute_url, is_url_absolute
from crawlee._utils.wait import wait_for
Expand Down Expand Up @@ -485,6 +487,7 @@ async def persist_state_factory() -> KeyValueStore:
self._robots_txt_file_cache: LRUCache[str, RobotsTxtFile] = LRUCache(maxsize=1000)
self._robots_txt_lock = asyncio.Lock()
self._tld_extractor = TLDExtract(cache_dir=tempfile.TemporaryDirectory().name)
self._throttling_manager: ThrottlingRequestManager | None = None
self._snapshotter = Snapshotter.from_config(config)
self._autoscaled_pool = AutoscaledPool(
system_status=SystemStatus(self._snapshotter),
Expand Down Expand Up @@ -611,12 +614,18 @@ async def _get_proxy_info(self, request: Request, session: Session | None) -> Pr
)

async def get_request_manager(self) -> RequestManager:
"""Return the configured request manager. If none is configured, open and return the default request queue."""
"""Return the configured request manager. If none is configured, open and return the default request queue.

The returned manager is wrapped with `ThrottlingRequestManager` to enforce
per-domain delays from 429 responses and robots.txt crawl-delay directives.
"""
if not self._request_manager:
self._request_manager = await RequestQueue.open(
inner = await RequestQueue.open(
storage_client=self._service_locator.get_storage_client(),
configuration=self._service_locator.get_configuration(),
)
self._throttling_manager = ThrottlingRequestManager(inner)
self._request_manager = self._throttling_manager
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure if we should use ThrottlingRequestManager by default - thoughts, @vdusek @Pijukatel, @Mantisus?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it makes sense to enable it by default. When the crawler runs without a proxy, 429 will only increase the load on the site. This will not benefit either the site or the crawler.

Using a proxy requires a little configuration, so I think that an additional parameter to disable throttling for 429 will not complicate this.


return self._request_manager

Expand Down Expand Up @@ -1442,6 +1451,10 @@ async def __run_task_function(self) -> None:

await self._mark_request_as_handled(request)

# Record successful request to reset rate limit backoff for this domain.
if self._throttling_manager:
self._throttling_manager.record_success(request.url)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd rather use isinstance so that an explicitly configured ThrottlingRequestManager also works.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed! Updated to check isinstance(manager, ThrottlingRequestManager) accurately.


if session and session.is_usable:
session.mark_good()

Expand Down Expand Up @@ -1542,22 +1555,41 @@ def _raise_for_error_status_code(self, status_code: int) -> None:
if is_status_code_server_error(status_code) and not is_ignored_status:
raise HttpStatusCodeError('Error status code returned', status_code)

def _raise_for_session_blocked_status_code(self, session: Session | None, status_code: int) -> None:
def _raise_for_session_blocked_status_code(
self,
session: Session | None,
status_code: int,
*,
url: str = '',
retry_after_header: str | None = None,
) -> None:
"""Raise an exception if the given status code indicates the session is blocked.

If the status code is 429 (Too Many Requests), the domain is recorded as
rate-limited in the `RequestThrottler` for per-domain backoff.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Outdated name


Args:
session: The session used for the request. If None, no check is performed.
status_code: The HTTP status code to check.
url: The request URL, used for per-domain rate limit tracking.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps the parameter should be renamed to request_url so that it's not ambiguous with a URL after redirects.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. Renamed to request_url across BasicCrawler, AbstractHttpCrawler, and PlaywrightCrawler.

retry_after_header: The value of the Retry-After response header, if present.

Raises:
SessionError: If the status code indicates the session is blocked.
"""
if status_code == 429 and url:
retry_after = parse_retry_after_header(retry_after_header)
if self._throttling_manager:
self._throttling_manager.record_domain_delay(url, retry_after=retry_after)

if session is not None and session.is_blocked_status_code(
status_code=status_code,
ignore_http_error_status_codes=self._ignore_http_error_status_codes,
):
raise SessionError(f'Assuming the session is blocked based on HTTP status code {status_code}')

# NOTE: _parse_retry_after_header has been moved to crawlee._utils.http.parse_retry_after_header

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# NOTE: _parse_retry_after_header has been moved to crawlee._utils.http.parse_retry_after_header

def _check_request_collision(self, request: Request, session: Session | None) -> None:
"""Raise an exception if a request cannot access required resources.

Expand All @@ -1582,7 +1614,16 @@ async def _is_allowed_based_on_robots_txt_file(self, url: str) -> bool:
if not self._respect_robots_txt_file:
return True
robots_txt_file = await self._get_robots_txt_file_for_url(url)
return not robots_txt_file or robots_txt_file.is_allowed(url)
if not robots_txt_file:
return True

# Wire robots.txt crawl-delay into ThrottlingRequestManager (#1396).
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# Wire robots.txt crawl-delay into ThrottlingRequestManager (#1396).
# Wire robots.txt crawl-delay into ThrottlingRequestManager

if self._throttling_manager:
crawl_delay = robots_txt_file.get_crawl_delay()
if crawl_delay is not None:
self._throttling_manager.set_crawl_delay(url, crawl_delay)

return robots_txt_file.is_allowed(url)

async def _get_robots_txt_file_for_url(self, url: str) -> RobotsTxtFile | None:
"""Get the RobotsTxtFile for a given URL.
Expand Down
8 changes: 7 additions & 1 deletion src/crawlee/crawlers/_playwright/_playwright_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,13 @@ async def _handle_status_code_response(
"""
status_code = context.response.status
if self._retry_on_blocked:
self._raise_for_session_blocked_status_code(context.session, status_code)
retry_after_header = context.response.headers.get('retry-after')
self._raise_for_session_blocked_status_code(
context.session,
status_code,
url=context.request.url,
retry_after_header=retry_after_header,
)
self._raise_for_error_status_code(status_code)
yield context

Expand Down
10 changes: 9 additions & 1 deletion src/crawlee/request_loaders/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,13 @@
from ._request_manager import RequestManager
from ._request_manager_tandem import RequestManagerTandem
from ._sitemap_request_loader import SitemapRequestLoader
from ._throttling_request_manager import ThrottlingRequestManager

__all__ = ['RequestList', 'RequestLoader', 'RequestManager', 'RequestManagerTandem', 'SitemapRequestLoader']
__all__ = [
'RequestList',
'RequestLoader',
'RequestManager',
'RequestManagerTandem',
'SitemapRequestLoader',
'ThrottlingRequestManager',
]
Loading
Loading