Test TikTok Top 10 TikTok Videos of the Day:
Pricing
Pay per usage
Go to Store
Test TikTok Top 10 TikTok Videos of the Day:
Never done code before trying to create a TikTok scraper that can give me tiktoks for the past 24 hours that have the most views.
0.0 (0)
Pricing
Pay per usage
2
Total users
94
Monthly users
1
Runs succeeded
>99%
Last modified
2 years ago
.actor/Dockerfile
# Specify the base Docker image. You can read more about# the available images at https://docs.apify.com/sdk/js/docs/guides/docker-images# You can also use any other image from Docker Hub.FROM apify/actor-node:18
# Copy just package.json and package-lock.json# to speed up the build using Docker layer cache.COPY package*.json ./
# Install NPM packages, skip optional and development dependencies to# keep the image small. Avoid logging too much and print the dependency# tree for debuggingRUN npm --quiet set progress=false \ && npm install --omit=dev --omit=optional \ && echo "Installed NPM packages:" \ && (npm list --omit=dev --all || true) \ && echo "Node.js version:" \ && node --version \ && echo "NPM version:" \ && npm --version \ && rm -r ~/.npm
# Next, copy the remaining files and directories with the source code.# Since we do this after NPM install, quick build will be really fast# for most source file changes.COPY . ./
# Run the image.CMD npm start --silent
.actor/actor.json
{ "actorSpecification": 1, "name": "my-actor-1", "title": "Scrape single page in JavaScript", "description": "Scrape data from single page with provided URL.", "version": "0.0", "meta": { "templateId": "js-start" }, "input": "./input_schema.json", "dockerfile": "./Dockerfile"}
.actor/input_schema.json
{ "title": "Scrape data from a web page", "type": "object", "schemaVersion": 1, "properties": { "url": { "title": "URL of the page", "type": "string", "description": "The URL of website you want to get the data from.", "editor": "textfield", "prefill": "https://www.apify.com/" } }, "required": ["url"]}
src/SCRIPT TIKTOK
const axios = require('axios');const cheerio = require('cheerio');
async function scrapeTopTikTokVideos() { try { const response = await axios.get('https://hypothetical-tiktok-website.com/top-videos'); const $ = cheerio.load(response.data);
const topVideos = [10];
// Replace the selectors below with the appropriate ones for the hypothetical website's video elements $('div.video-item').each((index, element) => { const title = $(element).find('h2.title').text().trim(); const url = $(element).find('a.video-link').attr('href'); const timestamp = $(element).find('span.timestamp').text().trim();
topVideos.push({ title, url, timestamp, });
// Limit to the top 10 videos if (topVideos.length === 10) { return false; // Exit the .each() loop } });
return topVideos; } catch (error) { console.error('Error occurred during scraping:', error.message); return []; }}
// Call the function to scrape the top 10 TikTok videosscrapeTopTikTokVideos() .then((topVideos) => { console.log('Top 10 TikTok Videos of the Day:'); topVideos.forEach((video, index) => { console.log(`${index + 1}. ${video.title}`); console.log(` Posted At: ${video.timestamp}`); console.log(` URL: ${video.url}`); console.log('-------------------------'); }); }) .catch((error) => { console.error('Error occurred:', error.message); });
src/main.js
1// Axios - Promise based HTTP client for the browser and node.js (Read more at https://axios-http.com/docs/intro).2import axios from "axios";3// Cheerio - The fast, flexible & elegant library for parsing and manipulating HTML and XML (Read more at https://cheerio.js.org/).4import * as cheerio from "cheerio";5// Apify SDK - toolkit for building Apify Actors (Read more at https://docs.apify.com/sdk/js/).6import { Actor } from "apify";7
8// The init() call configures the Actor for its environment. It's recommended to start every Actor with an init().9await Actor.init();10
11// Structure of input is defined in input_schema.json12const input = await Actor.getInput();13const { url } = input;14
15// Fetch the HTML content of the page.16const response = await axios.get(url);17
18// Parse the downloaded HTML with Cheerio to enable data extraction.19const $ = cheerio.load(response.data);20
21// Extract all headings from the page (tag name and text).22const headings = [];23$("h1, h2, h3, h4, h5, h6").each((i, element) => {24 const headingObject = {25 level: $(element).prop("tagName").toLowerCase(),26 text: $(element).text(),27 };28 console.log("Extracted heading", headingObject);29 headings.push(headingObject);30});31
32// Save headings to Dataset - a table-like storage.33await Actor.pushData(headings);34
35// Gracefully exit the Actor process. It's recommended to quit all Actors with an exit().36await Actor.exit();
.dockerignore
# configurations.idea
# crawlee and apify storage foldersapify_storagecrawlee_storagestorage
# installed filesnode_modules
# git folder.git
.gitignore
# This file tells Git which files shouldn't be added to source control.DS_Store.ideadistnode_modulesapify_storagestorage/*!storage/key_value_storesstorage/key_value_stores/*!storage/key_value_stores/defaultstorage/key_value_stores/default/*!storage/key_value_stores/default/INPUT.json
package.json
{ "name": "js-scrape-single-page", "version": "0.0.1", "type": "module", "description": "This is an example of an Apify actor.", "engines": { "node": ">=18.0.0" }, "dependencies": { "apify": "^3.0.0", "axios": "^1.4.0", "cheerio": "^1.0.0-rc.12" }, "scripts": { "start": "node ./src/main.js", "test": "echo \"Error: oops, the actor has no tests yet, sad!\" && exit 1" }, "author": "It's not you it's me", "license": "ISC"}