![WCC Pinecone Integration avatar](https://images.apifyusercontent.com/BC0NxNb30MvyVlksDTwVfagocs0lYBcYp7K58gUF3Fs/rs:fill:250:250/cb:1/aHR0cHM6Ly9hcGlmeS1pbWFnZS11cGxvYWRzLXByb2QuczMudXMtZWFzdC0xLmFtYXpvbmF3cy5jb20vZjNmMEo5ekNhTUEwa2VtZHgvd1YwUnR5dG9vVzlGb2JoRnYtV0NDX1BpbmVjb25lX0ludGVncmF0aW9uLnBuZw.webp)
WCC Pinecone Integration
No credit card required
![WCC Pinecone Integration](https://images.apifyusercontent.com/BC0NxNb30MvyVlksDTwVfagocs0lYBcYp7K58gUF3Fs/rs:fill:250:250/cb:1/aHR0cHM6Ly9hcGlmeS1pbWFnZS11cGxvYWRzLXByb2QuczMudXMtZWFzdC0xLmFtYXpvbmF3cy5jb20vZjNmMEo5ekNhTUEwa2VtZHgvd1YwUnR5dG9vVzlGb2JoRnYtV0NDX1BpbmVjb25lX0ludGVncmF0aW9uLnBuZw.webp)
WCC Pinecone Integration
No credit card required
Crawl any website and store its content in your Pinecone vector database. Enhance the accuracy and reliability of your own AI Assistant with facts fetched from external sources or connect this integration to our Pinecone GPT Chatbot assistant available in Apify Store.
Do you want to learn more about this Actor?
Get a demoYou can access the WCC Pinecone Integration programmatically from your own applications by using the Apify API. You can choose the language preference from below. To use the Apify API, you’ll need an Apify account and your API token, found in Integrations settings in Apify Console.
1{
2 "openapi": "3.0.1",
3 "info": {
4 "version": "0.0",
5 "x-build-id": "BB5ECWSEdgh4C1nF5"
6 },
7 "servers": [
8 {
9 "url": "https://api.apify.com/v2"
10 }
11 ],
12 "paths": {
13 "/acts/tri_angle~wcc-pinecone-integration/run-sync-get-dataset-items": {
14 "post": {
15 "operationId": "run-sync-get-dataset-items-tri_angle-wcc-pinecone-integration",
16 "x-openai-isConsequential": false,
17 "summary": "Executes an Actor, waits for its completion, and returns Actor's dataset items in response.",
18 "tags": [
19 "Run Actor"
20 ],
21 "requestBody": {
22 "required": true,
23 "content": {
24 "application/json": {
25 "schema": {
26 "$ref": "#/components/schemas/inputSchema"
27 }
28 }
29 }
30 },
31 "parameters": [
32 {
33 "name": "token",
34 "in": "query",
35 "required": true,
36 "schema": {
37 "type": "string"
38 },
39 "description": "Enter your Apify token here"
40 }
41 ],
42 "responses": {
43 "200": {
44 "description": "OK"
45 }
46 }
47 }
48 },
49 "/acts/tri_angle~wcc-pinecone-integration/runs": {
50 "post": {
51 "operationId": "runs-sync-tri_angle-wcc-pinecone-integration",
52 "x-openai-isConsequential": false,
53 "summary": "Executes an Actor and returns information about the initiated run in response.",
54 "tags": [
55 "Run Actor"
56 ],
57 "requestBody": {
58 "required": true,
59 "content": {
60 "application/json": {
61 "schema": {
62 "$ref": "#/components/schemas/inputSchema"
63 }
64 }
65 }
66 },
67 "parameters": [
68 {
69 "name": "token",
70 "in": "query",
71 "required": true,
72 "schema": {
73 "type": "string"
74 },
75 "description": "Enter your Apify token here"
76 }
77 ],
78 "responses": {
79 "200": {
80 "description": "OK",
81 "content": {
82 "application/json": {
83 "schema": {
84 "$ref": "#/components/schemas/runsResponseSchema"
85 }
86 }
87 }
88 }
89 }
90 }
91 },
92 "/acts/tri_angle~wcc-pinecone-integration/run-sync": {
93 "post": {
94 "operationId": "run-sync-tri_angle-wcc-pinecone-integration",
95 "x-openai-isConsequential": false,
96 "summary": "Executes an Actor, waits for completion, and returns the OUTPUT from Key-value store in response.",
97 "tags": [
98 "Run Actor"
99 ],
100 "requestBody": {
101 "required": true,
102 "content": {
103 "application/json": {
104 "schema": {
105 "$ref": "#/components/schemas/inputSchema"
106 }
107 }
108 }
109 },
110 "parameters": [
111 {
112 "name": "token",
113 "in": "query",
114 "required": true,
115 "schema": {
116 "type": "string"
117 },
118 "description": "Enter your Apify token here"
119 }
120 ],
121 "responses": {
122 "200": {
123 "description": "OK"
124 }
125 }
126 }
127 }
128 },
129 "components": {
130 "schemas": {
131 "inputSchema": {
132 "type": "object",
133 "required": [
134 "openaiApiKey",
135 "pineconeApiKey",
136 "pineconeIndexName"
137 ],
138 "properties": {
139 "url": {
140 "title": "Website URL",
141 "type": "string",
142 "description": "A URL of a website where to fetch the web pages from. The URL can be a top-level domain like https://example.com, a subdirectory https://example.com/some-directory/, or a specific page https://example.com/some-directory/page.html."
143 },
144 "query": {
145 "title": "Vector database query",
146 "type": "string",
147 "description": "Text query that will be used to search relevant documents in the vector database using similarity search. This query will be converted into an embedding vector using OpenAI embedding function and it will be compared to the vectors of documents stored in the vector database."
148 },
149 "noCrawling": {
150 "title": "No website crawling and vector DB update (query only)",
151 "type": "boolean",
152 "description": "If enabled, the crawler will not be started and the actor will only search the vector database for the given query.",
153 "default": false
154 },
155 "openaiApiKey": {
156 "title": "OpenAI API key",
157 "type": "string",
158 "description": "OpenAI API key to generate vector embeddings for documents that are stored to the vector database and also for the database query."
159 },
160 "pineconeApiKey": {
161 "title": "Pinecone API key",
162 "type": "string",
163 "description": "Your Pinecone API key."
164 },
165 "pineconeIndexName": {
166 "title": "Pinecone index name",
167 "pattern": "^[-a-z0-9]+$",
168 "type": "string",
169 "description": "The name of the Pinecone index where you want to store the vectors."
170 },
171 "topKResults": {
172 "title": "Top K results",
173 "minimum": 1,
174 "type": "integer",
175 "description": "The number of top results to return from the vector database. The results will be sorted by similarity to the query vector.",
176 "default": 10
177 },
178 "cacheKeyValueStoreName": {
179 "title": "Cache key-value store",
180 "type": "string",
181 "description": "The name of the key-value store where the actor will cache URLs of the fetched websites. If the website is already being crawled, the actor will be aborted.",
182 "default": "website-content-vector-cache"
183 },
184 "maxResults": {
185 "title": "Max results",
186 "minimum": 0,
187 "type": "integer",
188 "description": "The maximum number of resulting web pages to store. The crawler will automatically finish after reaching this number. This setting is useful to prevent accidental crawler runaway. If both **Max page** and **Max results** are defined, then the crawler will finish when the first limit is reached. Note that the crawler skips pages with the canonical URL of a page that has already been crawled, hence it might crawl more pages than there are results.",
189 "default": 9999999
190 },
191 "chunkSize": {
192 "title": "Chunk size",
193 "minimum": 1,
194 "type": "integer",
195 "description": "The maximum size of each chunk in characters.",
196 "default": 2000
197 },
198 "chunkOverlap": {
199 "title": "Chunk overlap",
200 "minimum": 0,
201 "type": "integer",
202 "description": "The number of overlapping characters between consecutive chunks.",
203 "default": 200
204 },
205 "crawlerType": {
206 "title": "Crawler type",
207 "enum": [
208 "playwright:firefox",
209 "playwright:chrome",
210 "playwright:adaptive",
211 "cheerio",
212 "jsdom"
213 ],
214 "type": "string",
215 "description": "Select the crawling engine:\n- **Headless web browser** - Useful for modern websites with anti-scraping protections and JavaScript rendering. It recognizes common blocking patterns like CAPTCHAs and automatically retries blocked requests through new sessions. However, running web browsers is more expensive as it requires more computing resources and is slower. It is recommended to use at least 8 GB of RAM.\n- **Stealthy web browser** (default) - Another headless web browser with anti-blocking measures enabled. Try this if you encounter bot protection while scraping. For best performance, use with Apify Proxy residential IPs. \n- **Adaptive switching between Chrome and raw HTTP client** - The crawler automatically switches between raw HTTP for static pages and Chrome browser (via Playwright) for dynamic pages, to get the maximum performance wherever possible. \n- **Raw HTTP client** - High-performance crawling mode that uses raw HTTP requests to fetch the pages. It is faster and cheaper, but it might not work on all websites.",
216 "default": "playwright:firefox"
217 },
218 "includeUrlGlobs": {
219 "title": "Include URLs (globs)",
220 "type": "array",
221 "description": "Glob patterns matching URLs of pages that will be included in crawling. \n\nSetting this option will disable the default Start URLs based scoping and will allow you to customize the crawling scope yourself. Note that this affects only links found on pages, but not **Start URLs** - if you want to crawl a page, make sure to specify its URL in the **Start URLs** field. \n\nFor example `https://{store,docs}.example.com/**` lets the crawler to access all URLs starting with `https://store.example.com/` or `https://docs.example.com/`, and `https://example.com/**/*\\?*foo=*` allows the crawler to access all URLs that contain `foo` query parameter with any value.\n\nLearn more about globs and test them [here](https://www.digitalocean.com/community/tools/glob?comments=true&glob=https%3A%2F%2Fexample.com%2Fscrape_this%2F%2A%2A&matches=false&tests=https%3A%2F%2Fexample.com%2Ftools%2F&tests=https%3A%2F%2Fexample.com%2Fscrape_this%2F&tests=https%3A%2F%2Fexample.com%2Fscrape_this%2F123%3Ftest%3Dabc&tests=https%3A%2F%2Fexample.com%2Fdont_scrape_this).",
222 "default": [],
223 "items": {
224 "type": "object",
225 "required": [
226 "glob"
227 ],
228 "properties": {
229 "glob": {
230 "type": "string",
231 "title": "Glob of a web page"
232 }
233 }
234 }
235 },
236 "excludeUrlGlobs": {
237 "title": "Exclude URLs (globs)",
238 "type": "array",
239 "description": "Glob patterns matching URLs of pages that will be excluded from crawling. Note that this affects only links found on pages, but not **Start URLs**, which are always crawled. \n\nFor example `https://{store,docs}.example.com/**` excludes all URLs starting with `https://store.example.com/` or `https://docs.example.com/`, and `https://example.com/**/*\\?*foo=*` excludes all URLs that contain `foo` query parameter with any value.\n\nLearn more about globs and test them [here](https://www.digitalocean.com/community/tools/glob?comments=true&glob=https%3A%2F%2Fexample.com%2Fdont_scrape_this%2F%2A%2A&matches=false&tests=https%3A%2F%2Fexample.com%2Ftools%2F&tests=https%3A%2F%2Fexample.com%2Fdont_scrape_this%2F&tests=https%3A%2F%2Fexample.com%2Fdont_scrape_this%2F123%3Ftest%3Dabc&tests=https%3A%2F%2Fexample.com%2Fscrape_this).",
240 "default": [],
241 "items": {
242 "type": "object",
243 "required": [
244 "glob"
245 ],
246 "properties": {
247 "glob": {
248 "type": "string",
249 "title": "Glob of a web page"
250 }
251 }
252 }
253 },
254 "ignoreCanonicalUrl": {
255 "title": "Ignore canonical URLs",
256 "type": "boolean",
257 "description": "If enabled, the Actor will ignore the canonical URL reported by the page, and use the actual URL instead. You can use this feature for websites that report invalid canonical URLs, which causes the Actor to skip those pages in results.",
258 "default": false
259 },
260 "maxCrawlDepth": {
261 "title": "Max crawling depth",
262 "minimum": 0,
263 "type": "integer",
264 "description": "The maximum number of links starting from the start URL that the crawler will recursively follow. The start URLs have depth `0`, the pages linked directly from the start URLs have depth `1`, and so on.\n\nThis setting is useful to prevent accidental crawler runaway. By setting it to `0`, the Actor will only crawl the Start URLs.",
265 "default": 20
266 },
267 "maxCrawlPages": {
268 "title": "Max pages",
269 "minimum": 0,
270 "type": "integer",
271 "description": "The maximum number pages to crawl. It includes the start URLs, pagination pages, pages with no content, etc. The crawler will automatically finish after reaching this number. This setting is useful to prevent accidental crawler runaway.",
272 "default": 9999999
273 },
274 "initialConcurrency": {
275 "title": "Initial concurrency",
276 "minimum": 0,
277 "maximum": 999,
278 "type": "integer",
279 "description": "The initial number of web browsers or HTTP clients running in parallel. The system scales the concurrency up and down based on the current CPU and memory load. If the value is set to 0 (default), the Actor uses the default setting for the specific crawler type.\n\nNote that if you set this value too high, the Actor will run out of memory and crash. If too low, it will be slow at start before it scales the concurrency up.",
280 "default": 0
281 },
282 "maxConcurrency": {
283 "title": "Max concurrency",
284 "minimum": 1,
285 "maximum": 999,
286 "type": "integer",
287 "description": "The maximum number of web browsers or HTTP clients running in parallel. This setting is useful to avoid overloading the target websites and to avoid getting blocked.",
288 "default": 200
289 },
290 "initialCookies": {
291 "title": "Initial cookies",
292 "type": "array",
293 "description": "Cookies that will be pre-set to all pages the scraper opens. This is useful for pages that require login. The value is expected to be a JSON array of objects with `name` and `value` properties. For example: `[{\"name\": \"cookieName\", \"value\": \"cookieValue\"}]`.\n\nYou can use the [EditThisCookie](https://chrome.google.com/webstore/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg) browser extension to copy browser cookies in this format, and paste it here.",
294 "default": []
295 },
296 "proxyConfiguration": {
297 "title": "Proxy configuration",
298 "type": "object",
299 "description": "Enables loading the websites from IP addresses in specific geographies and to circumvent blocking.",
300 "default": {
301 "useApifyProxy": true
302 }
303 },
304 "maxSessionRotations": {
305 "title": "Maximum number of session rotations",
306 "minimum": 0,
307 "maximum": 20,
308 "type": "integer",
309 "description": "The maximum number of times the crawler will rotate the session (IP address + browser configuration) on anti-scraping measures like CAPTCHAs. If the crawler rotates the session more than this number and the page is still blocked, it will finish with an error.",
310 "default": 10
311 },
312 "maxRequestRetries": {
313 "title": "Maximum number of retries on network / server errors",
314 "minimum": 0,
315 "maximum": 20,
316 "type": "integer",
317 "description": "The maximum number of times the crawler will retry the request on network, proxy or server errors. If the (n+1)-th request still fails, the crawler will mark this request as failed.",
318 "default": 5
319 },
320 "requestTimeoutSecs": {
321 "title": "Request timeout",
322 "minimum": 1,
323 "maximum": 600,
324 "type": "integer",
325 "description": "Timeout (in seconds) for making the request and processing its response. Defaults to 60s.",
326 "default": 60
327 },
328 "minFileDownloadSpeedKBps": {
329 "title": "Minimum file download speed (kilobytes per second)",
330 "type": "integer",
331 "description": "The minimum viable file download speed in kilobytes per seconds. If the file download speed is lower than this value for a prolonged duration, the crawler will consider the file download as failing, abort it, and retry it again (up to \"Maximum number of retries\" times). This is useful to avoid your crawls being stuck on slow file downloads.",
332 "default": 128
333 },
334 "dynamicContentWaitSecs": {
335 "title": "Wait for dynamic content (seconds)",
336 "type": "integer",
337 "description": "The maximum time to wait for dynamic page content to load. By default, it is 10 seconds. The crawler will continue either if this time elapses, or if it detects the network became idle as there are no more requests for additional resources.\n\nNote that this setting is ignored for the raw HTTP client, because it doesn't execute JavaScript or loads any dynamic resources.",
338 "default": 10
339 },
340 "maxScrollHeightPixels": {
341 "title": "Maximum scroll height (pixels)",
342 "minimum": 0,
343 "type": "integer",
344 "description": "The crawler will scroll down the page until all content is loaded (and network becomes idle), or until this maximum scrolling height is reached. Setting this value to `0` disables scrolling altogether.\n\nNote that this setting is ignored for the raw HTTP client, because it doesn't execute JavaScript or loads any dynamic resources.",
345 "default": 5000
346 },
347 "removeElementsCssSelector": {
348 "title": "Remove HTML elements (CSS selector)",
349 "type": "string",
350 "description": "A CSS selector matching HTML elements that will be removed from the DOM, before converting it to text, Markdown, or saving as HTML. This is useful to skip irrelevant page content. \n\nBy default, the Actor removes common navigation elements, headers, footers, modals, scripts, and inline image. You can disable the removal by setting this value to some non-existent CSS selector like `dummy_keep_everything`.",
351 "default": "nav, footer, script, style, noscript, svg,\n[role=\"alert\"],\n[role=\"banner\"],\n[role=\"dialog\"],\n[role=\"alertdialog\"],\n[role=\"region\"][aria-label*=\"skip\" i],\n[aria-modal=\"true\"]"
352 },
353 "removeCookieWarnings": {
354 "title": "Remove cookie warnings",
355 "type": "boolean",
356 "description": "If enabled, the Actor will try to remove cookies consent dialogs or modals, using the [I don't care about cookies](https://addons.mozilla.org/en-US/firefox/addon/i-dont-care-about-cookies/) browser extension, to improve the accuracy of the extracted text. Note that there is a small performance penalty if this feature is enabled.\n\nThis setting is ignored when using the raw HTTP crawler type.",
357 "default": true
358 },
359 "clickElementsCssSelector": {
360 "title": "Expand clickable elements",
361 "type": "string",
362 "description": "A CSS selector matching DOM elements that will be clicked. This is useful for expanding collapsed sections, in order to capture their text content.",
363 "default": "[aria-expanded=\"false\"]"
364 },
365 "htmlTransformer": {
366 "title": "HTML transformer",
367 "enum": [
368 "readableTextIfPossible",
369 "readableText",
370 "extractus",
371 "none"
372 ],
373 "type": "string",
374 "description": "Specify how to transform the HTML to extract meaningful content without any extra fluff, like navigation or modals. The HTML transformation happens after removing and clicking the DOM elements.\n\n- **Readable text with fallback** - Extracts the main contents of the webpage, without navigation and other fluff while carefully checking the content integrality.\n\n- **Readable text** (default) - Extracts the main contents of the webpage, without navigation and other fluff.\n- **Extractus** - Uses Extractus library.\n- **None** - Only removes the HTML elements specified via 'Remove HTML elements' option.\n\nYou can examine output of all transformers by enabling the debug mode.\n",
375 "default": "readableText"
376 },
377 "readableTextCharThreshold": {
378 "title": "Readable text extractor character threshold",
379 "type": "integer",
380 "description": "A configuration options for the \"Readable text\" HTML transformer. It contains the minimum number of characters an article must have in order to be considered relevant.",
381 "default": 100
382 },
383 "aggressivePrune": {
384 "title": "Remove duplicate text lines",
385 "type": "boolean",
386 "description": "This is an **experimental feature**. If enabled, the crawler will prune content lines that are very similar to the ones already crawled on other pages, using the Count-Min Sketch algorithm. This is useful to strip repeating content in the scraped data like menus, headers, footers, etc. In some (not very likely) cases, it might remove relevant content from some pages.",
387 "default": false
388 },
389 "debugMode": {
390 "title": "Debug mode (stores output of all HTML transformers)",
391 "type": "boolean",
392 "description": "If enabled, the Actor will store the output of all types of HTML transformers, including the ones that are not used by default, and it will also store the HTML to Key-value Store with a link. All this data is stored under the `debug` field in the resulting Dataset.",
393 "default": false
394 },
395 "debugLog": {
396 "title": "Debug log",
397 "type": "boolean",
398 "description": "If enabled, the actor log will include debug messages. Beware that this can be quite verbose.",
399 "default": false
400 }
401 }
402 },
403 "runsResponseSchema": {
404 "type": "object",
405 "properties": {
406 "data": {
407 "type": "object",
408 "properties": {
409 "id": {
410 "type": "string"
411 },
412 "actId": {
413 "type": "string"
414 },
415 "userId": {
416 "type": "string"
417 },
418 "startedAt": {
419 "type": "string",
420 "format": "date-time",
421 "example": "2025-01-08T00:00:00.000Z"
422 },
423 "finishedAt": {
424 "type": "string",
425 "format": "date-time",
426 "example": "2025-01-08T00:00:00.000Z"
427 },
428 "status": {
429 "type": "string",
430 "example": "READY"
431 },
432 "meta": {
433 "type": "object",
434 "properties": {
435 "origin": {
436 "type": "string",
437 "example": "API"
438 },
439 "userAgent": {
440 "type": "string"
441 }
442 }
443 },
444 "stats": {
445 "type": "object",
446 "properties": {
447 "inputBodyLen": {
448 "type": "integer",
449 "example": 2000
450 },
451 "rebootCount": {
452 "type": "integer",
453 "example": 0
454 },
455 "restartCount": {
456 "type": "integer",
457 "example": 0
458 },
459 "resurrectCount": {
460 "type": "integer",
461 "example": 0
462 },
463 "computeUnits": {
464 "type": "integer",
465 "example": 0
466 }
467 }
468 },
469 "options": {
470 "type": "object",
471 "properties": {
472 "build": {
473 "type": "string",
474 "example": "latest"
475 },
476 "timeoutSecs": {
477 "type": "integer",
478 "example": 300
479 },
480 "memoryMbytes": {
481 "type": "integer",
482 "example": 1024
483 },
484 "diskMbytes": {
485 "type": "integer",
486 "example": 2048
487 }
488 }
489 },
490 "buildId": {
491 "type": "string"
492 },
493 "defaultKeyValueStoreId": {
494 "type": "string"
495 },
496 "defaultDatasetId": {
497 "type": "string"
498 },
499 "defaultRequestQueueId": {
500 "type": "string"
501 },
502 "buildNumber": {
503 "type": "string",
504 "example": "1.0.0"
505 },
506 "containerUrl": {
507 "type": "string"
508 },
509 "usage": {
510 "type": "object",
511 "properties": {
512 "ACTOR_COMPUTE_UNITS": {
513 "type": "integer",
514 "example": 0
515 },
516 "DATASET_READS": {
517 "type": "integer",
518 "example": 0
519 },
520 "DATASET_WRITES": {
521 "type": "integer",
522 "example": 0
523 },
524 "KEY_VALUE_STORE_READS": {
525 "type": "integer",
526 "example": 0
527 },
528 "KEY_VALUE_STORE_WRITES": {
529 "type": "integer",
530 "example": 1
531 },
532 "KEY_VALUE_STORE_LISTS": {
533 "type": "integer",
534 "example": 0
535 },
536 "REQUEST_QUEUE_READS": {
537 "type": "integer",
538 "example": 0
539 },
540 "REQUEST_QUEUE_WRITES": {
541 "type": "integer",
542 "example": 0
543 },
544 "DATA_TRANSFER_INTERNAL_GBYTES": {
545 "type": "integer",
546 "example": 0
547 },
548 "DATA_TRANSFER_EXTERNAL_GBYTES": {
549 "type": "integer",
550 "example": 0
551 },
552 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
553 "type": "integer",
554 "example": 0
555 },
556 "PROXY_SERPS": {
557 "type": "integer",
558 "example": 0
559 }
560 }
561 },
562 "usageTotalUsd": {
563 "type": "number",
564 "example": 0.00005
565 },
566 "usageUsd": {
567 "type": "object",
568 "properties": {
569 "ACTOR_COMPUTE_UNITS": {
570 "type": "integer",
571 "example": 0
572 },
573 "DATASET_READS": {
574 "type": "integer",
575 "example": 0
576 },
577 "DATASET_WRITES": {
578 "type": "integer",
579 "example": 0
580 },
581 "KEY_VALUE_STORE_READS": {
582 "type": "integer",
583 "example": 0
584 },
585 "KEY_VALUE_STORE_WRITES": {
586 "type": "number",
587 "example": 0.00005
588 },
589 "KEY_VALUE_STORE_LISTS": {
590 "type": "integer",
591 "example": 0
592 },
593 "REQUEST_QUEUE_READS": {
594 "type": "integer",
595 "example": 0
596 },
597 "REQUEST_QUEUE_WRITES": {
598 "type": "integer",
599 "example": 0
600 },
601 "DATA_TRANSFER_INTERNAL_GBYTES": {
602 "type": "integer",
603 "example": 0
604 },
605 "DATA_TRANSFER_EXTERNAL_GBYTES": {
606 "type": "integer",
607 "example": 0
608 },
609 "PROXY_RESIDENTIAL_TRANSFER_GBYTES": {
610 "type": "integer",
611 "example": 0
612 },
613 "PROXY_SERPS": {
614 "type": "integer",
615 "example": 0
616 }
617 }
618 }
619 }
620 }
621 }
622 }
623 }
624 }
625}
🌲 WCC Pinecone Integration OpenAPI definition
OpenAPI is a standard for designing and describing RESTful APIs, allowing developers to define API structure, endpoints, and data formats in a machine-readable way. It simplifies API development, integration, and documentation.
OpenAPI is effective when used with AI agents and GPTs by standardizing how these systems interact with various APIs, for reliable integrations and efficient communication.
By defining machine-readable API specifications, OpenAPI allows AI models like GPTs to understand and use varied data sources, improving accuracy. This accelerates development, reduces errors, and provides context-aware responses, making OpenAPI a core component for AI applications.
You can download the OpenAPI definitions for WCC Pinecone Integration from the options below:
If you’d like to learn more about how OpenAPI powers GPTs, read our blog post.
You can also check out our other API clients:
Actor Metrics
20 monthly users
-
4 stars
4.8% runs succeeded
2.4 days response time
Created in May 2024
Modified 3 months ago