
Profesia.sk Scraper
Pricing
$25.00/month + usage

Profesia.sk Scraper
One-stop-shop for all data on Profesia.sk Extract job offers, list of companies, positions, locations... Job offers include salary, textual info, company, and more
0.0 (0)
Pricing
$25.00/month + usage
1
Monthly users
2
Runs succeeded
>99%
Last modified
2 years ago
You can access the Profesia.sk Scraper programmatically from your own applications by using the Apify API. You can also choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
1{
2 "openapi": "3.0.1",
3 "info": {
4 "version": "0.0",
5 "x-build-id": "7grtoI4EQtsCWRNlh"
6 },
7 "servers": [
8 {
9 "url": "https://api.apify.com/v2"
10 }
11 ],
12 "paths": {
13 "/acts/jurooravec~profesia-sk-scraper/run-sync-get-dataset-items": {
14 "post": {
15 "operationId": "run-sync-get-dataset-items-jurooravec-profesia-sk-scraper",
16 "x-openai-isConsequential": false,
17 "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18 "tags": [
19 "Run Actor"
20 ],
21 "requestBody": {
22 "required": true,
23 "content": {
24 "application/json": {
25 "schema": {
26 "$ref": "#/components/schemas/inputSchema"
27 }
28 }
29 }
30 },
31 "parameters": [
32 {
33 "name": "token",
34 "in": "query",
35 "required": true,
36 "schema": {
37 "type": "string"
38 },
39 "description": "Enter your Apify token here"
40 }
41 ],
42 "responses": {
43 "200": {
44 "description": "OK"
45 }
46 }
47 }
48 },
49 "/acts/jurooravec~profesia-sk-scraper/runs": {
50 "post": {
51 "operationId": "runs-sync-jurooravec-profesia-sk-scraper",
52 "x-openai-isConsequential": false,
53 "summary": "Executes an Actor and returns information about the initiated run in response.",
54 "tags": [
55 "Run Actor"
56 ],
57 "requestBody": {
58 "required": true,
59 "content": {
60 "application/json": {
61 "schema": {
62 "$ref": "#/components/schemas/inputSchema"
63 }
64 }
65 }
66 },
67 "parameters": [
68 {
69 "name": "token",
70 "in": "query",
71 "required": true,
72 "schema": {
73 "type": "string"
74 },
75 "description": "Enter your Apify token here"
76 }
77 ],
78 "responses": {
79 "200": {
80 "description": "OK",
81 "content": {
82 "application/json": {
83 "schema": {
84 "$ref": "#/components/schemas/runsResponseSchema"
85 }
86 }
87 }
88 }
89 }
90 }
91 },
92 "/acts/jurooravec~profesia-sk-scraper/run-sync": {
93 "post": {
94 "operationId": "run-sync-jurooravec-profesia-sk-scraper",
95 "x-openai-isConsequential": false,
96 "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97 "tags": [
98 "Run Actor"
99 ],
100 "requestBody": {
101 "required": true,
102 "content": {
103 "application/json": {
104 "schema": {
105 "$ref": "#/components/schemas/inputSchema"
106 }
107 }
108 }
109 },
110 "parameters": [
111 {
112 "name": "token",
113 "in": "query",
114 "required": true,
115 "schema": {
116 "type": "string"
117 },
118 "description": "Enter your Apify token here"
119 }
120 ],
121 "responses": {
122 "200": {
123 "description": "OK"
124 }
125 }
126 }
127 }
128 },
129 "components": {
130 "schemas": {
131 "inputSchema": {
132 "type": "object",
133 "properties": {
134 "datasetType": {
135 "title": "Dataset type",
136 "enum": [
137 "jobOffers",
138 "industries",
139 "professions",
140 "companies",
141 "languages",
142 "locations",
143 "partners"
144 ],
145 "type": "string",
146 "description": "Use this option if you want to scrape a whole dataset,\n not just specific URLs.<br/><br/>\n This option is ignored if <strong>Start URLs:</strong> are given",
147 "default": "jobOffers"
148 },
149 "jobOfferDetailed": {
150 "title": "Detailed",
151 "type": "boolean",
152 "description": "If checked, the scraper will obtain more detailed info\n for job offers by visit the details page of each job offer.<br/><br/>\n If un-checked, only the data from the listing page is extracted.<br/><br/>\n For details, please refer to https://apify.com/jurooravec/profesia-sk-scraper#output",
153 "default": true
154 },
155 "jobOfferFilterQuery": {
156 "title": "Search keywords (full-text search)",
157 "type": "string",
158 "description": "Comma-separated list of keywords. If given, only entries\n matching the keywords will be retrieved (full-text search)"
159 },
160 "jobOfferFilterMinSalaryValue": {
161 "title": "Min salary",
162 "minimum": 1,
163 "type": "integer",
164 "description": "If set, only entries offering this much or more will be extracted"
165 },
166 "jobOfferFilterMinSalaryPeriod": {
167 "title": "Min salary per hour/month",
168 "enum": [
169 "month",
170 "hour"
171 ],
172 "type": "string",
173 "description": "Choose if the minimum salary is in per hour or per month format",
174 "default": "month"
175 },
176 "jobOfferFilterEmploymentType": {
177 "title": "Type of employment",
178 "enum": [
179 "fte",
180 "pte",
181 "selfemploy",
182 "voluntary",
183 "internship"
184 ],
185 "type": "string",
186 "description": "If set, only entries with this employment filter will be extracted"
187 },
188 "jobOfferFilterRemoteWorkType": {
189 "title": "Remote vs On-site",
190 "enum": [
191 "fullRemote",
192 "partialRemote",
193 "noRemote"
194 ],
195 "type": "string",
196 "description": "If set, only entries with this type of remote work filter will be extracted"
197 },
198 "jobOfferFilterLastNDays": {
199 "title": "Last N days",
200 "minimum": 0,
201 "type": "integer",
202 "description": "If set, only entries up to this much days old will be extracted.\n E.g. 7 = max 1 week old, 31 = max 1 month old, ..."
203 },
204 "jobOfferCountOnly": {
205 "title": "Count the matched job offers",
206 "type": "boolean",
207 "description": "If checked, no data is extracted. Instead, the count of matched\n job offers is printed in the log.",
208 "default": false
209 },
210 "inputExtendUrl": {
211 "title": "Extend Actor input from URL",
212 "type": "string",
213 "description": "Extend Actor input with a config from a URL.<br/>\n For example, you can store your actor input in a source control, and import it here.<br/>\n In case of a conflict (if a field is defined both in Actor input and in imported input) the Actor input overwrites the imported fields.<br/>\n The URL is requested with GET method, and must point to a JSON file containing a single object (the config).<br/>\n If you need to send a POST request or to modify the response further, use `inputExtendFromFunction` instead."
214 },
215 "inputExtendFromFunction": {
216 "title": "Extend Actor input from custom function",
217 "type": "string",
218 "description": "Extend Actor input with a config defined by a custom function.<br/>\n For example, you can store your actor input in a source control, and import it here.<br/>\n In case of a conflict (if a field is defined both in Actor input and in imported input) the Actor input overwrites the imported fields.<br/>\n The function must return an object (the config)."
219 },
220 "startUrls": {
221 "title": "Start URLs",
222 "type": "array",
223 "description": "List of URLs to scrape.",
224 "items": {
225 "type": "object",
226 "required": [
227 "url"
228 ],
229 "properties": {
230 "url": {
231 "type": "string",
232 "title": "URL of a web page",
233 "format": "uri"
234 }
235 }
236 }
237 },
238 "startUrlsFromDataset": {
239 "title": "Start URLs from Dataset",
240 "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*#.+$",
241 "type": "string",
242 "description": "Import URLs to scrape from an existing Dataset.<br/>\n The dataset and the field to import from should be written as `{datasetID}#{field}`.<br/>\n Example: `datasetid123#url` will take URLs from dataset `datasetid123` from field `url`."
243 },
244 "startUrlsFromFunction": {
245 "title": "Start URLs from custom function",
246 "type": "string",
247 "description": "Import or generate URLs to scrape using a custom function.<br/>"
248 },
249 "proxy": {
250 "title": "Proxy configuration",
251 "type": "object",
252 "description": "Select proxies to be used by your crawler."
253 },
254 "includePersonalData": {
255 "title": "Include personal data",
256 "type": "boolean",
257 "description": "By default, fields that are potential personal data are censored. Toggle this option on to get the un-uncensored values.<br/>\n <strong>WARNING:</strong> Turn this on ONLY if you have consent, legal basis for using the data, or at your own risk. <a href=\"https://gdpr.eu/eu-gdpr-personal-data/\">Learn more</a>",
258 "default": false
259 },
260 "requestMaxEntries": {
261 "title": "Limit the number of requests",
262 "minimum": 0,
263 "type": "integer",
264 "description": "If set, only at most this many requests will be processed.<br/>\n The count is determined from the RequestQueue that's used for the Actor run.<br/>\n This means that if `requestMaxEntries` is set to 50, but the associated queue already handled 40 requests, then only 10 new requests will be handled."
265 },
266 "requestTransform": {
267 "title": "Transform requests",
268 "type": "string",
269 "description": "Freely transform the request object using a custom function.<br/>\n If not set, the request will remain as is."
270 },
271 "requestTransformBefore": {
272 "title": "Transform requests - Setup",
273 "type": "string",
274 "description": "Use this if you need to run one-time initialization code before `requestTransform`."
275 },
276 "requestTransformAfter": {
277 "title": "Transform requests - Teardown",
278 "type": "string",
279 "description": "Use this if you need to run one-time teardown code after `requestTransform`."
280 },
281 "requestFilter": {
282 "title": "Filter requests",
283 "type": "string",
284 "description": "Decide which requests should be processed by using a custom function.<br/>\n If not set, all requests will be included.<br/>\n This is done after `requestTransform`.<br/>"
285 },
286 "requestFilterBefore": {
287 "title": "Filter requests - Setup",
288 "type": "string",
289 "description": "Use this if you need to run one-time initialization code before `requestFilter`.<br/>"
290 },
291 "requestFilterAfter": {
292 "title": "Filter requests - Teardown",
293 "type": "string",
294 "description": "Use this if you need to run one-time teardown code after `requestFilter`.<br/>"
295 },
296 "requestQueueId": {
297 "title": "RequestQueue ID",
298 "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*$",
299 "type": "string",
300 "description": "By default, requests are stored in the default request queue.\n Set this option if you want to use a non-default queue.\n <a href=\"https://docs.apify.com/sdk/python/docs/concepts/storages#opening-named-and-unnamed-storages\">Learn more</a><br/>\n <strong>NOTE:</strong> RequestQueue name can only contain letters 'a' through 'z', the digits '0' through '9', and the hyphen ('-') but only in the middle of the string (e.g. 'my-value-1')"
301 },
302 "outputMaxEntries": {
303 "title": "Limit the number of scraped entries",
304 "minimum": 0,
305 "type": "integer",
306 "description": "If set, only at most this many entries will be scraped.<br/>\n The count is determined from the Dataset that's used for the Actor run.<br/>\n This means that if `outputMaxEntries` is set to 50, but the associated Dataset already has 40 items in it, then only 10 new entries will be saved."
307 },
308 "outputRenameFields": {
309 "title": "Rename dataset fields",
310 "type": "object",
311 "description": "Rename fields (columns) of the output data.<br/>\n If not set, all fields will have their original names.<br/>\n This is done before `outputPickFields`.<br/>\n Keys can be nested, e.g. `\"someProp.value[0]\"`.\n Nested path is resolved using <a href=\"https://lodash.com/docs/4.17.15#get\">Lodash.get()</a>."
312 },
313 "outputPickFields": {
314 "title": "Pick dataset fields",
315 "type": "array",
316 "description": "Select a subset of fields of an entry that will be pushed to the dataset.<br/>\n If not set, all fields on an entry will be pushed to the dataset.<br/>\n This is done after `outputRenameFields`.<br/>\n Keys can be nested, e.g. `\"someProp.value[0]\"`.\n Nested path is resolved using <a href=\"https://lodash.com/docs/4.17.15#get\">Lodash.get()</a>.",
317 "items": {
318 "type": "string"
319 }
320 },
321 "outputTransform": {
322 "title": "Transform entries",
323 "type": "string",
324 "description": "Freely transform the output data object using a custom function.<br/>\n If not set, the data will remain as is.<br/>\n This is done after `outputPickFields` and `outputRenameFields`.<br/>"
325 },
326 "outputTransformBefore": {
327 "title": "Transform entries - Setup",
328 "type": "string",
329 "description": "Use this if you need to run one-time initialization code before `outputTransform`.<br/>"
330 },
331 "outputTransformAfter": {
332 "title": "Transform entries - Teardown",
333 "type": "string",
334 "description": "Use this if you need to run one-time teardown code after `outputTransform`.<br/>"
335 },
336 "outputFilter": {
337 "title": "Filter entries",
338 "type": "string",
339 "description": "Decide which scraped entries should be included in the output by using a custom function.<br/>\n If not set, all scraped entries will be included.<br/>\n This is done after `outputPickFields`, `outputRenameFields`, and `outputTransform`.<br/>"
340 },
341 "outputFilterBefore": {
342 "title": "Filter entries - Setup",
343 "type": "string",
344 "description": "Use this if you need to run one-time initialization code before `outputFilter`.<br/>"
345 },
346 "outputFilterAfter": {
347 "title": "Filter entries - Teardown",
348 "type": "string",
349 "description": "Use this if you need to run one-time teardown code after `outputFilter`.<br/>"
350 },
351 "outputDatasetId": {
352 "title": "Dataset ID",
353 "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*$",
354 "type": "string",
355 "description": "By default, data is written to Default dataset.\n Set this option if you want to write data to non-default dataset.\n <a href=\"https://docs.apify.com/sdk/python/docs/concepts/storages#opening-named-and-unnamed-storages\">Learn more</a><br/>\n <strong>NOTE:</strong> Dataset name can only contain letters 'a' through 'z', the digits '0' through '9', and the hyphen ('-') but only in the middle of the string (e.g. 'my-value-1')"
356 },
357 "outputCacheStoreId": {
358 "title": "Cache ID",
359 "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*$",
360 "type": "string",
361 "description": "Set this option if you want to cache scraped entries in <a href=\"https://docs.apify.com/sdk/js/docs/guides/result-storage#key-value-store\">Apify's Key-value store</a>.<br/>\n This is useful for example when you want to scrape only NEW entries. In such case, you can use the `outputFilter` option to define a custom function to filter out entries already found in the cache.\n <a href=\"https://docs.apify.com/sdk/python/docs/concepts/storages#working-with-key-value-stores\">Learn more</a><br/>\n <strong>NOTE:</strong> Cache name can only contain letters 'a' through 'z', the digits '0' through '9', and the hyphen ('-') but only in the middle of the string (e.g. 'my-value-1')"
362 },
363 "outputCachePrimaryKeys": {
364 "title": "Cache primary keys",
365 "type": "array",
366 "description": "Specify fields that uniquely identify entries (primary keys), so entries can be compared against the cache.<br/>\n <strong>NOTE:</strong> If not set, the entries are hashed based on all fields",
367 "items": {
368 "type": "string"
369 }
370 },
371 "outputCacheActionOnResult": {
372 "title": "Cache action on result",
373 "enum": [
374 "add",
375 "remove",
376 "overwrite"
377 ],
378 "type": "string",
379 "description": "Specify whether scraped results should be added to, removed from, or overwrite the cache.<br/>\n - <strong>add<strong> - Adds scraped results to the cache<br/>\n - <strong>remove<strong> - Removes scraped results from the cache<br/>\n - <strong>set<strong> - First clears all entries from the cache, then adds scraped results to the cache<br/>\n <strong>NOTE:</strong> No action happens when this field is empty."
380 },
381 "maxRequestRetries": {
382 "title": "maxRequestRetries",
383 "minimum": 0,
384 "type": "integer",
385 "description": "Indicates how many times the request is retried if <a href=\"https://crawlee.dev/api/basic-crawler/interface/BasicCrawlerOptions#requestHandler\">BasicCrawlerOptions.requestHandler</a> fails."
386 },
387 "maxRequestsPerMinute": {
388 "title": "maxRequestsPerMinute",
389 "minimum": 1,
390 "type": "integer",
391 "description": "The maximum number of requests per minute the crawler should run. We can pass any positive, non-zero integer."
392 },
393 "maxRequestsPerCrawl": {
394 "title": "maxRequestsPerCrawl",
395 "minimum": 1,
396 "type": "integer",
397 "description": "Maximum number of pages that the crawler will open. The crawl will stop when this limit is reached.\n <br/> <strong>NOTE:</strong> In cases of parallel crawling, the actual number of pages visited might be slightly higher than this value."
398 },
399 "minConcurrency": {
400 "title": "minConcurrency",
401 "minimum": 1,
402 "type": "integer",
403 "description": "Sets the minimum concurrency (parallelism) for the crawl.<br/>\n <strong>WARNING:</strong> If we set this value too high with respect to the available system memory and CPU, our crawler will run extremely slow or crash. If not sure, it's better to keep the default value and the concurrency will scale up automatically."
404 },
405 "maxConcurrency": {
406 "title": "maxConcurrency",
407 "minimum": 1,
408 "type": "integer",
409 "description": "Sets the maximum concurrency (parallelism) for the crawl."
410 },
411 "navigationTimeoutSecs": {
412 "title": "navigationTimeoutSecs",
413 "minimum": 0,
414 "type": "integer",
415 "description": "Timeout in which the HTTP request to the resource needs to finish, given in seconds."
416 },
417 "requestHandlerTimeoutSecs": {
418 "title": "requestHandlerTimeoutSecs",
419 "minimum": 0,
420 "type": "integer",
421 "description": "Timeout in which the function passed as <a href=\"https://crawlee.dev/api/basic-crawler/interface/BasicCrawlerOptions#requestHandler\">BasicCrawlerOptions.requestHandler</a> needs to finish, in seconds."
422 },
423 "keepAlive": {
424 "title": "keepAlive",
425 "type": "boolean",
426 "description": "Allows to keep the crawler alive even if the RequestQueue gets empty. With keepAlive: true the crawler will keep running, waiting for more requests to come."
427 },
428 "ignoreSslErrors": {
429 "title": "ignoreSslErrors",
430 "type": "boolean",
431 "description": "If set to true, SSL certificate errors will be ignored."
432 },
433 "additionalMimeTypes": {
434 "title": "additionalMimeTypes",
435 "uniqueItems": true,
436 "type": "array",
437 "description": "An array of MIME types you want the crawler to load and process. By default, only text/html and application/xhtml+xml MIME types are supported.",
438 "items": {
439 "type": "string"
440 }
441 },
442 "suggestResponseEncoding": {
443 "title": "suggestResponseEncoding",
444 "type": "string",
445 "description": "By default this crawler will extract correct encoding from the HTTP response headers. There are some websites which use invalid headers. Those are encoded using the UTF-8 encoding. If those sites actually use a different encoding, the response will be corrupted. You can use suggestResponseEncoding to fall back to a certain encoding, if you know that your target website uses it. To force a certain encoding, disregarding the response headers, use forceResponseEncoding."
446 },
447 "forceResponseEncoding": {
448 "title": "forceResponseEncoding",
449 "type": "string",
450 "description": "By default this crawler will extract correct encoding from the HTTP response headers. Use forceResponseEncoding to force a certain encoding, disregarding the response headers. To only provide a default for missing encodings, use suggestResponseEncoding."
451 },
452 "perfBatchSize": {
453 "title": "Batch requests",
454 "minimum": 0,
455 "type": "integer",
456 "description": "If set, multiple Requests will be handled by a single Actor instance.<br/>\n Example: If set to 20, then up to 20 requests will be handled in a single \"go\", after which the actor instance will reset.<br/>\n <a href=\"https://docs.apify.com/platform/actors/development/performance#batch-jobs-win-over-the-single-jobs\">See Apify documentation</a>."
457 },
458 "perfBatchWaitSecs": {
459 "title": "Wait (in seconds) between processing requests in a single batch",
460 "minimum": 0,
461 "type": "integer",
462 "description": "How long to wait between entries within a single batch.<br/>\n Increase this value if you're using batching and you're sending requests to the scraped website too fast.<br/>\n Example: If set to 1, then after each entry in a batch, wait 1 second before continuing."
463 },
464 "logLevel": {
465 "title": "Log Level",
466 "enum": [
467 "off",
468 "debug",
469 "info",
470 "warn",
471 "error"
472 ],
473 "type": "string",
474 "description": "Select how detailed should be the logging.",
475 "default": "info"
476 },
477 "errorReportingDatasetId": {
478 "title": "Error reporting dataset ID",
479 "pattern": "^[a-zA-Z0-9][a-zA-Z0-9-]*$",
480 "type": "string",
481 "description": "Dataset ID to which errors should be captured.<br/>\n Default: `'REPORTING'`.<br/>\n <strong>NOTE:</strong> Dataset name can only contain letters 'a' through 'z', the digits '0' through '9', and the hyphen ('-') but only in the middle of the string (e.g. 'my-value-1')",
482 "default": "REPORTING"
483 },
484 "errorSendToTelemetry": {
485 "title": "Send errors to Sentry",
486 "type": "boolean",
487 "description": "Whether to report actor errors to telemetry such as <a href=\"https://sentry.io/\">Sentry</a>.<br/>\n This info is used by the author of this actor to identify broken integrations,\n and track down and fix issues.",
488 "default": true
489 },
490 "metamorphActorId": {
491 "title": "Metamorph actor ID - metamorph to another actor at the end",
492 "type": "string",
493 "description": "Use this option if you want to run another actor with the same dataset after this actor has finished (AKA metamorph into another actor). <a href=\"https://docs.apify.com/sdk/python/docs/concepts/interacting-with-other-actors#actormetamorph\">Learn more</a> <br/>\n New actor is identified by its ID, e.g. \"apify/web-scraper\"."
494 },
495 "metamorphActorBuild": {
496 "title": "Metamorph actor build",
497 "type": "string",
498 "description": "Tag or number of the target actor build to metamorph into (e.g. 'beta' or '1.2.345')"
499 },
500 "metamorphActorInput": {
501 "title": "Metamorph actor input",
502 "type": "object",
503 "description": "Input object passed to the follow-up (metamorph) actor. <a href=\"https://docs.apify.com/sdk/python/docs/concepts/interacting-with-other-actors#actormetamorph\">Learn more</a>"
504 }
505 }
506 },
507 "runsResponseSchema": {
508 "type": "object",
509 "properties": {
510 "data": {
511 "type": "object",
512 "properties": {
513 "id": {
514 "type": "string"
515 },
516 "actId": {
517 "type": "string"
518 },
519 "userId": {
520 "type": "string"
521 },
522 "startedAt": {
523 "type": "string",
524 "format": "date-time",
525 "example": "2025-01-08T00:00:00.000Z"
526 },
527 "finishedAt": {
528 "type": "string",
529 "format": "date-time",
530 "example": "2025-01-08T00:00:00.000Z"
531 },
532 "status": {
533 "type": "string",
534 "example": "READY"
535 },
536 "meta": {
537 "type": "object",
538 "properties": {
539 "origin": {
540 "type": "string",
541 "example": "API"
542 },
543 "userAgent": {
544 "type": "string"
545 }
546 }
547 },
548 "stats": {
549 "type": "object",
550 "properties": {
551 "inputBodyLen": {
552 "type": "integer",
553 "example": 2000
554 },
555 "rebootCount": {
556 "type": "integer",
557 "example": 0
558 },
559 "restartCount": {
560 "type": "integer",
561 "example": 0
562 },
563 "resurrectCount": {
564 "type": "integer",
565 "example": 0
566 },
567 "computeUnits": {
568 "type": "integer",
569 "example": 0
570 }
571 }
572 },
573 "options": {
574 "type": "object",
575 "properties": {
576 "build": {
577 "type": "string",
578 "example": "latest"
579 },
580 "timeoutSecs": {
581 "type": "integer",
582 "example": 300
583 },
584 "memoryMbytes": {
585 "type": "integer",
586 "example": 1024
587 },
588 "diskMbytes": {
589 "type": "integer",
590 "example": 2048
591 }
592 }
593 },
594 "buildId": {
595 "type": "string"
596 },
597 "defaultKeyValueStoreId": {
598 "type": "string"
599 },
600 "defaultDatasetId": {
601 "type": "string"
602 },
603 "defaultRequestQueueId": {
604 "type": "string"
605 },
606 "buildNumber": {
607 "type": "string",
608 "example": "1.0.0"
609 },
610 "containerUrl": {
611 "type": "string"
612 },
613 "usage": {
614 "type": "object",
615 "properties": {
616 "ACTOR_COMPUTE_UNITS": {
617 "type": "integer",
618 "example": 0
619 },
620 "DATASET_READS": {
621 "type": "integer",
622 "example": 0
623 },
624 "DATASET_WRITES": {
625 "type": "integer",
626 "example": 0
627 },
628 "KEY_VALUE_STORE_READS": {
629 "type": "integer",
630 "example": 0
631 },
632 "KEY_VALUE_STORE_WRITES": {
633 "type": "integer",
634 "example": 1
635 },
636 "KEY_VALUE_STORE_LISTS": {
637 "type": "integer",
638 "example": 0
639 },
640 "REQUEST_QUEUE_READS": {
641 "type": "integer",
642 "example": 0
643 },
644 "REQUEST_QUEUE_WRITES": {
645 "type": "integer",
646 "example": 0
647 },
648 "DATA_TRANSFER_INTERNAL_GBYTES": {
649 "type": "integer",
650 "example": 0
651 },
652 "DATA_TRANSFER_EXTERNAL_GBYTES": {
653 "type": "integer",
654 "example": 0
655 },
656 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
657 "type": "integer",
658 "example": 0
659 },
660 "PROXY_SERPS": {
661 "type": "integer",
662 "example": 0
663 }
664 }
665 },
666 "usageTotalUsd": {
667 "type": "number",
668 "example": 0.00005
669 },
670 "usageUsd": {
671 "type": "object",
672 "properties": {
673 "ACTOR_COMPUTE_UNITS": {
674 "type": "integer",
675 "example": 0
676 },
677 "DATASET_READS": {
678 "type": "integer",
679 "example": 0
680 },
681 "DATASET_WRITES": {
682 "type": "integer",
683 "example": 0
684 },
685 "KEY_VALUE_STORE_READS": {
686 "type": "integer",
687 "example": 0
688 },
689 "KEY_VALUE_STORE_WRITES": {
690 "type": "number",
691 "example": 0.00005
692 },
693 "KEY_VALUE_STORE_LISTS": {
694 "type": "integer",
695 "example": 0
696 },
697 "REQUEST_QUEUE_READS": {
698 "type": "integer",
699 "example": 0
700 },
701 "REQUEST_QUEUE_WRITES": {
702 "type": "integer",
703 "example": 0
704 },
705 "DATA_TRANSFER_INTERNAL_GBYTES": {
706 "type": "integer",
707 "example": 0
708 },
709 "DATA_TRANSFER_EXTERNAL_GBYTES": {
710 "type": "integer",
711 "example": 0
712 },
713 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
714 "type": "integer",
715 "example": 0
716 },
717 "PROXY_SERPS": {
718 "type": "integer",
719 "example": 0
720 }
721 }
722 }
723 }
724 }
725 }
726 }
727 }
728 }
729}
Profesia.sk Scraper OpenAPI definition
OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for Profesia.sk Scraper from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients:
Pricing
Pricing model
RentalTo use this Actor, you have to pay a monthly rental fee to the developer. The rent is subtracted from your prepaid usage every month after the free trial period. You also pay for the Apify platform usage.
Free trial
3 days
Price
$25.00