my-jewels.com Scraper avatar
my-jewels.com Scraper

Pricing

Pay per usage

Go to Store
my-jewels.com Scraper

my-jewels.com Scraper

Developed by

Mark Carter

Mark Carter

Maintained by Community

Scrape my-jewels.com and extract data on clothing accessories from my-jewels.com. Our my-jewels.com API lets you crawl product information and pricing. The saved data can be downloaded as HTML, JSON, CSV, Excel, and XML.

0.0 (0)

Pricing

Pay per usage

1

Total users

4

Monthly users

1

Runs succeeded

>99%

Last modified

3 years ago

INPUT_SCHEMA.json

{
"title": "my-jewelscom-scraper",
"description": "",
"type": "object",
"schemaVersion": 1,
"properties": {
"maxRequestsPerCrawl": {
"title": "Max items",
"description": "How many items to extract from my-jewels.com",
"default": 20,
"prefill": 20,
"type": "integer",
"editor": "number"
},
"extendOutputFunction": {
"title": "Extend Output Function",
"description": "Add or remove properties on the output object or omit the output returning null",
"type": "string",
"default": "async ({ data, item, product, images, fns, name, request, variants, context, customData, input, Apify }) => {\n return item;\n}",
"prefill": "async ({ data, item, product, images, fns, name, request, variants, context, customData, input, Apify }) => {\n return item;\n}",
"editor": "javascript",
"sectionCaption": "Extend scraper functionality",
"sectionDescription": "You can change the output of the items for your dataset here, or add additional behavior on the scraper."
},
"extendScraperFunction": {
"title": "Extend Scraper Function",
"description": "Advanced function that allows you to extend the default scraper functionality, allowing you to manually perform actions on the page",
"type": "string",
"default": "async ({ fns, customData, Apify, label }) => {\n \n}",
"prefill": "async ({ fns, customData, Apify, label }) => {\n \n}",
"editor": "javascript"
},
"customData": {
"title": "Custom data",
"description": "Any data that you want to have available inside the Extend Output/Scraper Function",
"default": {},
"prefill": {},
"type": "object",
"editor": "json"
},
"fetchHtml": {
"title": "Fetch HTML",
"description": "If you decide to fetch the HTML of the pages, it will take twice as long. Make sure to only enable this if needed",
"default": true,
"editor": "checkbox",
"type": "boolean"
},
"maxConcurrency": {
"title": "Max concurrency",
"description": "Max concurrency to use",
"default": 20,
"prefill": 20,
"type": "integer",
"editor": "number"
},
"maxRequestRetries": {
"title": "Max request retries",
"description": "Set the max request retries",
"default": 3,
"prefill": 3,
"type": "integer",
"editor": "number"
},
"debugLog": {
"title": "Debug Log",
"description": "Enable a more verbose logging to be able to understand what's happening during the scraping",
"type": "boolean",
"default": false,
"editor": "checkbox"
}
}
}

main.js

1import Apify from 'apify';
2
3Apify.main(async () => {
4 const input = await Apify.getInput();
5
6 await Apify.metamorph('pocesar/shopify-scraper', {
7 ...input,
8 startUrls: [{
9 url: 'http://www.my-jewels.com',
10 }],
11 });
12});

package.json

{
"name": "my-jewelscom-scraper",
"version": "0.0.1",
"type": "module",
"dependencies": {
"apify": "^2.3.2"
},
"scripts": {
"start": "node main.js"
}
}

Dockerfile

# First, specify the base Docker image. You can read more about
# the available images at https://sdk.apify.com/docs/guides/docker-images
# You can also use any other image from Docker Hub.
FROM apify/actor-node:16
# Second, copy just package.json and package-lock.json since those are the only
# files that affect "npm install" in the next step, to speed up the build.
COPY package*.json ./
RUN npm --quiet set progress=false \
&& npm install --only=prod --no-optional \
&& echo "Installed NPM packages:" \
&& (npm list --only=prod --no-optional --all || true) \
&& echo "Node.js version:" \
&& node --version \
&& echo "NPM version:" \
&& npm --version
COPY . ./
ENV APIFY_DISABLE_OUTDATED_WARNING 1
ENV npm_config_loglevel=silent