GitHub Stars
Pricing
Pay per usage
Go to Store
GitHub Stars
Input will be the URL of any GitHub repository, and output will be GitHub Stars.
0.0 (0)
Pricing
Pay per usage
1
Total users
33
Monthly users
8
Runs succeeded
>99%
Last modified
a year ago
.actor/Dockerfile
# First, specify the base Docker image.# You can see the Docker images from Apify at https://hub.docker.com/r/apify/.# You can also use any other image from Docker Hub.FROM apify/actor-python-selenium:3.11
# Second, copy just requirements.txt into the Actor image,# since it should be the only file that affects the dependency install in the next step,# in order to speed up the buildCOPY requirements.txt ./
# Install the packages specified in requirements.txt,# Print the installed Python version, pip version# and all installed packages with their versions for debuggingRUN echo "Python version:" \ && python --version \ && echo "Pip version:" \ && pip --version \ && echo "Installing dependencies:" \ && pip install -r requirements.txt \ && echo "All installed Python packages:" \ && pip freeze
# Next, copy the remaining files and directories with the source code.# Since we do this after installing the dependencies, quick build will be really fast# for most source file changes.COPY . ./
# Use compileall to ensure the runnability of the Actor Python code.RUN python3 -m compileall -q .
# Specify how to launch the source code of your Actor.# By default, the "python3 -m src" command is runCMD ["python3", "-m", "src"]
.actor/actor.json
{ "actorSpecification": 1, "name": "my-actor-1", "title": "Getting started with Python and Selenium", "description": "Scrapes titles of websites using Selenium.", "version": "0.0", "meta": { "templateId": "python-selenium" }, "input": "./input_schema.json", "dockerfile": "./Dockerfile", "storages": { "dataset": { "actorSpecification": 1, "title": "Name of repo and stars", "views": { "titles": { "title": "Name of repo and stars", "transformation": { "fields": [ "repo_name", "stars_count" ] }, "display": { "component": "table", "properties": { "repo_name": { "label": "Name", "format": "text" }, "star_count": { "label": "Stars", "format": "text" } } } } } } }}
.actor/input_schema.json
{ "title": "Python Selenium Scraper", "type": "object", "schemaVersion": 1, "properties": { "start_urls": { "title": "Start URLs", "type": "array", "description": "URLs to start with", "prefill": [ { "url": "https://github.com/apify/crawlee" } ], "editor": "requestListSources" }, "max_depth": { "title": "Maximum depth", "type": "integer", "description": "Depth to which to scrape to", "default": 1 } }, "required": ["start_urls"]}
src/__main__.py
1"""2This module serves as the entry point for executing the Apify Actor. It handles the configuration of logging3settings. The `main()` coroutine is then executed using `asyncio.run()`.4
5Feel free to modify this file to suit your specific needs.6"""7
8import asyncio9import logging10
11from apify.log import ActorLogFormatter12
13from .main import main14
15# Configure loggers16handler = logging.StreamHandler()17handler.setFormatter(ActorLogFormatter())18
19apify_client_logger = logging.getLogger('apify_client')20apify_client_logger.setLevel(logging.INFO)21apify_client_logger.addHandler(handler)22
23apify_logger = logging.getLogger('apify')24apify_logger.setLevel(logging.DEBUG)25apify_logger.addHandler(handler)26
27# Execute the Actor main coroutine28asyncio.run(main())
src/main.py
1from selenium import webdriver2from selenium.webdriver.chrome.options import Options as ChromeOptions3from selenium.webdriver.common.by import By4from selenium.webdriver.support.ui import WebDriverWait5from selenium.webdriver.support import expected_conditions as EC6from apify import Actor7
8async def main() -> None:9 async with Actor() as actor:10 repo_url = 'https://github.com/apify/crawlee' # Example repository URL11
12 chrome_options = ChromeOptions()13 if actor.config.headless:14 chrome_options.add_argument('--headless')15 chrome_options.add_argument('--no-sandbox')16 chrome_options.add_argument('--disable-dev-shm-usage')17 driver = webdriver.Chrome(options=chrome_options)18
19 try:20 driver.get(repo_url)21
22 wait = WebDriverWait(driver, 10)23 repo_name_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "strong a[href*='/apify/crawlee']")))24 repo_name = repo_name_element.text.strip() if repo_name_element else 'Repo name not found'25
26 stars_element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "a.Link--muted[href*='/stargazers'] strong")))27 stars_count = stars_element.text.strip() if stars_element else '0'28
29
30
31 print(f"Repository: {repo_name}, Stars: {stars_count}")32 await actor.push_data({33 'repo_url': repo_url,34 'repo_name': repo_name,35 'stars_count': stars_count36 })37
38 except Exception as e:39 actor.log.exception(f'Cannot extract data from {repo_url}. Exception: {e}')40 finally:41 driver.quit()
.dockerignore
# configurations.idea
# crawlee and apify storage foldersapify_storagecrawlee_storagestorage
# installed files.venv
# git folder.git
.editorconfig
root = true
[*]indent_style = spaceindent_size = 4charset = utf-8trim_trailing_whitespace = trueinsert_final_newline = trueend_of_line = lf
.gitignore
# This file tells Git which files shouldn't be added to source control
.idea.DS_Store
apify_storagestorage
.venv/.env/__pypackages__dist/build/*.egg-info/*.egg
__pycache__
.mypy_cache.dmypy.jsondmypy.json.pytest_cache.ruff_cache
.scrapy*.log
requirements.txt
1# Feel free to add your Python dependencies below. For formatting guidelines, see:2# https://pip.pypa.io/en/latest/reference/requirements-file-format/3
4apify ~= 1.5.15selenium ~= 4.14.0