Test TikTok Top 10 TikTok Videos of the Day:
Try for free
No credit card required
Go to Store
Test TikTok Top 10 TikTok Videos of the Day:
k9tod/my-actor-1
Try for free
No credit card required
Never done code before trying to create a TikTok scraper that can give me tiktoks for the past 24 hours that have the most views.
.actor/Dockerfile
1# Specify the base Docker image. You can read more about
2# the available images at https://docs.apify.com/sdk/js/docs/guides/docker-images
3# You can also use any other image from Docker Hub.
4FROM apify/actor-node:18
5
6# Copy just package.json and package-lock.json
7# to speed up the build using Docker layer cache.
8COPY package*.json ./
9
10# Install NPM packages, skip optional and development dependencies to
11# keep the image small. Avoid logging too much and print the dependency
12# tree for debugging
13RUN npm --quiet set progress=false \
14 && npm install --omit=dev --omit=optional \
15 && echo "Installed NPM packages:" \
16 && (npm list --omit=dev --all || true) \
17 && echo "Node.js version:" \
18 && node --version \
19 && echo "NPM version:" \
20 && npm --version \
21 && rm -r ~/.npm
22
23# Next, copy the remaining files and directories with the source code.
24# Since we do this after NPM install, quick build will be really fast
25# for most source file changes.
26COPY . ./
27
28
29# Run the image.
30CMD npm start --silent
.actor/actor.json
1{
2 "actorSpecification": 1,
3 "name": "my-actor-1",
4 "title": "Scrape single page in JavaScript",
5 "description": "Scrape data from single page with provided URL.",
6 "version": "0.0",
7 "meta": {
8 "templateId": "js-start"
9 },
10 "input": "./input_schema.json",
11 "dockerfile": "./Dockerfile"
12}
.actor/input_schema.json
1{
2 "title": "Scrape data from a web page",
3 "type": "object",
4 "schemaVersion": 1,
5 "properties": {
6 "url": {
7 "title": "URL of the page",
8 "type": "string",
9 "description": "The URL of website you want to get the data from.",
10 "editor": "textfield",
11 "prefill": "https://www.apify.com/"
12 }
13 },
14 "required": ["url"]
15}
src/SCRIPT TIKTOK
1const axios = require('axios');
2const cheerio = require('cheerio');
3
4async function scrapeTopTikTokVideos() {
5 try {
6 const response = await axios.get('https://hypothetical-tiktok-website.com/top-videos');
7 const $ = cheerio.load(response.data);
8
9 const topVideos = [10];
10
11 // Replace the selectors below with the appropriate ones for the hypothetical website's video elements
12 $('div.video-item').each((index, element) => {
13 const title = $(element).find('h2.title').text().trim();
14 const url = $(element).find('a.video-link').attr('href');
15 const timestamp = $(element).find('span.timestamp').text().trim();
16
17 topVideos.push({
18 title,
19 url,
20 timestamp,
21 });
22
23 // Limit to the top 10 videos
24 if (topVideos.length === 10) {
25 return false; // Exit the .each() loop
26 }
27 });
28
29 return topVideos;
30 } catch (error) {
31 console.error('Error occurred during scraping:', error.message);
32 return [];
33 }
34}
35
36// Call the function to scrape the top 10 TikTok videos
37scrapeTopTikTokVideos()
38 .then((topVideos) => {
39 console.log('Top 10 TikTok Videos of the Day:');
40 topVideos.forEach((video, index) => {
41 console.log(`${index + 1}. ${video.title}`);
42 console.log(` Posted At: ${video.timestamp}`);
43 console.log(` URL: ${video.url}`);
44 console.log('-------------------------');
45 });
46 })
47 .catch((error) => {
48 console.error('Error occurred:', error.message);
49 });
src/main.js
1// Axios - Promise based HTTP client for the browser and node.js (Read more at https://axios-http.com/docs/intro).
2import axios from "axios";
3// Cheerio - The fast, flexible & elegant library for parsing and manipulating HTML and XML (Read more at https://cheerio.js.org/).
4import * as cheerio from "cheerio";
5// Apify SDK - toolkit for building Apify Actors (Read more at https://docs.apify.com/sdk/js/).
6import { Actor } from "apify";
7
8// The init() call configures the Actor for its environment. It's recommended to start every Actor with an init().
9await Actor.init();
10
11// Structure of input is defined in input_schema.json
12const input = await Actor.getInput();
13const { url } = input;
14
15// Fetch the HTML content of the page.
16const response = await axios.get(url);
17
18// Parse the downloaded HTML with Cheerio to enable data extraction.
19const $ = cheerio.load(response.data);
20
21// Extract all headings from the page (tag name and text).
22const headings = [];
23$("h1, h2, h3, h4, h5, h6").each((i, element) => {
24 const headingObject = {
25 level: $(element).prop("tagName").toLowerCase(),
26 text: $(element).text(),
27 };
28 console.log("Extracted heading", headingObject);
29 headings.push(headingObject);
30});
31
32// Save headings to Dataset - a table-like storage.
33await Actor.pushData(headings);
34
35// Gracefully exit the Actor process. It's recommended to quit all Actors with an exit().
36await Actor.exit();
.dockerignore
1# configurations
2.idea
3
4# crawlee and apify storage folders
5apify_storage
6crawlee_storage
7storage
8
9# installed files
10node_modules
11
12# git folder
13.git
.gitignore
1# This file tells Git which files shouldn't be added to source control
2.DS_Store
3.idea
4dist
5node_modules
6apify_storage
7storage/*
8!storage/key_value_stores
9storage/key_value_stores/*
10!storage/key_value_stores/default
11storage/key_value_stores/default/*
12!storage/key_value_stores/default/INPUT.json
package.json
1{
2 "name": "js-scrape-single-page",
3 "version": "0.0.1",
4 "type": "module",
5 "description": "This is an example of an Apify actor.",
6 "engines": {
7 "node": ">=18.0.0"
8 },
9 "dependencies": {
10 "apify": "^3.0.0",
11 "axios": "^1.4.0",
12 "cheerio": "^1.0.0-rc.12"
13 },
14 "scripts": {
15 "start": "node ./src/main.js",
16 "test": "echo \"Error: oops, the actor has no tests yet, sad!\" && exit 1"
17 },
18 "author": "It's not you it's me",
19 "license": "ISC"
20}
Developer
Maintained by Community
Actor Metrics
8 monthly users
-
2 stars
>99% runs succeeded
Created in Jul 2023
Modified a year ago
Categories