NAV
shell python javascript

Backprop API v1.0.0

Scroll down for code samples, example requests and responses. Select a language for code samples from the tabs above or the mobile navigation menu.

Welcome to our API documentation! Things to note:

If anything is unclear or you find that something is not working as intended, please get in touch!

Base URLs:

Email: Support License: Apache 2.0

Authentication

Tasks

Endpoints to call tasks that implement various models for your use cases.

text vectorisation

Code samples

curl --request POST \
  --url https://api.backprop.co/text-vectorisation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"iPhone 12 128GB","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"text\":\"iPhone 12 128GB\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/text-vectorisation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "iPhone 12 128GB",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/text-vectorisation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /text-vectorisation

vectorises string or list of strings

Body parameter

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Parameters

Name In Type Required Description
body body TextVectorisationBody true text or list of text to vectorise

Example responses

200 Response

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Responses

Status Meaning Description Schema
200 OK successfully vectorised TextVectorisationResponse

question answering

Code samples

curl --request POST \
  --url https://api.backprop.co/qa \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"question":"What is the meaning of life?","context":"The meaning of life is 42.","prev_q":["What is not the meaning of life?"],"prev_a":["unknown"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"question\":\"What is the meaning of life?\",\"context\":\"The meaning of life is 42.\",\"prev_q\":[\"What is not the meaning of life?\"],\"prev_a\":[\"unknown\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/qa", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/qa");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /qa

Performs QA on question, context, previous questions and previous answers. Answers "unknown" if the question cannot be answered.

Body parameter

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body QABody true none

Example responses

200 Response

{
  "answer": "42"
}

Responses

Status Meaning Description Schema
200 OK successful QA QAResponse

text classification

Code samples

curl --request POST \
  --url https://api.backprop.co/text-classification \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"I am really mad because my product broke.","labels":["product issue","furniture","space"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"text\":\"I am really mad because my product broke.\",\"labels\":[\"product issue\",\"furniture\",\"space\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/text-classification", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/text-classification");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /text-classification

Performs classification on provided text and labels.

Body parameter

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body TextClassificationBody true none

Example responses

200 Response

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Responses

Status Meaning Description Schema
200 OK successful classification TextClassificationResponse

image classification

Code samples

curl --request POST \
  --url https://api.backprop.co/image-classification \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"image":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA","labels":["healthy brain","brain with tumor"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"image\":\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA\",\"labels\":[\"healthy brain\",\"brain with tumor\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/image-classification", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/image-classification");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /image-classification

Performs image classification on provided base64 encoded image and labels.

Body parameter

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body ImageClassificationBody true none

Example responses

200 Response

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Responses

Status Meaning Description Schema
200 OK successful classification ImageClassificationResponse

image vectorisation

Code samples

curl --request POST \
  --url https://api.backprop.co/image-vectorisation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"image":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA","model":"clip"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"image\":\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA\",\"model\":\"clip\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/image-vectorisation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "model": "clip"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/image-vectorisation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /image-vectorisation

vectorises image or list of images that are base64 encoded

Body parameter

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "model": "clip"
}

Parameters

Name In Type Required Description
body body ImageVectorisationBody true none

Example responses

200 Response

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Responses

Status Meaning Description Schema
200 OK successfully vectorised ImageVectorisationResponse

text generation

Code samples

curl --request POST \
  --url https://api.backprop.co/text-generation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"Geralt knew the signs, the monster was a","min_length":10,"max_length":20,"temperature":1,"top_k":0,"top_p":1,"repetition_penalty":1,"length_penalty":1,"num_beams":1,"num_generations":1,"model":"gpt2-large"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"text\":\"Geralt knew the signs, the monster was a\",\"min_length\":10,\"max_length\":20,\"temperature\":1,\"top_k\":0,\"top_p\":1,\"repetition_penalty\":1,\"length_penalty\":1,\"num_beams\":1,\"num_generations\":1,\"model\":\"gpt2-large\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/text-generation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/text-generation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /text-generation

Performs generation on the provided text with the specified parameters

Body parameter

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Parameters

Name In Type Required Description
body body TextGenerationBody true none

Example responses

200 Response

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Responses

Status Meaning Description Schema
200 OK successful generation TextGenerationResponse

text summarisation

Code samples

curl --request POST \
  --url https://api.backprop.co/summarisation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"Some long text to summarise","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"text\":\"Some long text to summarise\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/summarisation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "Some long text to summarise",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/summarisation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /summarisation

Performs summarisation on the provided text

Body parameter

{
  "text": "Some long text to summarise",
  "model": "english"
}

Parameters

Name In Type Required Description
body body SummarisationBody true none

Example responses

200 Response

{
  "output": "Summary of long text"
}

Responses

Status Meaning Description Schema
200 OK successful summarisation SummarisationResponse

emotion detection

Code samples

curl --request POST \
  --url https://api.backprop.co/emotion \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"I hope this works","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"text\":\"I hope this works\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/emotion", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "I hope this works",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/emotion");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /emotion

Performs emotion detection on the provided text. The response is a string of comma separated emotions. The emotions are from: neutral, admiration, approval, annoyance, gratitude, disapproval, amusement, curiosity, love, optimism, disappointment, joy, realization, anger, sadness, confusion, caring, excitement, surprise, disgust, desire, fear, remorse, embarrassment, nervousness, pride, relief, grief.

Body parameter

{
  "text": "I hope this works",
  "model": "english"
}

Parameters

Name In Type Required Description
body body EmotionBody true none

Example responses

200 Response

{
  "output": "optimism"
}

Responses

Status Meaning Description Schema
200 OK successful emotion detection EmotionResponse

custom

Code samples

curl --request POST \
  --url https://api.backprop.co/custom \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"my-custom-field":"whatever value","model":"my-model"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"my-custom-field\":\"whatever value\",\"model\":\"my-model\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/custom", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "my-custom-field": "whatever value",
  "model": "my-model"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/custom");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /custom

Performs your specified task on your uploaded model.

Body parameter

{
  "my-custom-field": "whatever value",
  "model": "my-model"
}

Parameters

Name In Type Required Description
body body CustomBody true none

Example responses

200 Response

{
  "output": 0.55
}

Responses

Status Meaning Description Schema
200 OK successful custom invocation CustomResponse

Models

Endpoints to view, upload and delete models

get models

Code samples

curl --request GET \
  --url https://api.backprop.co/models \
  --header 'Accept: application/json' \
  --header 'x-api-key: API_KEY'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

headers = {
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("GET", "/models", headers=headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = null;

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("GET", "https://api.backprop.co/models");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

GET /models

Gets a list of global and user models. Some info about global models is hidden.

Parameters

Name In Type Required Description
include_global query boolean false Whether to include global models in the list

Example responses

200 Response

[
  {
    "id": "model_model-name",
    "name": "model-name",
    "description": "Some description about model-name",
    "tasks": [
      "text-generation",
      "qa"
    ],
    "build_status": "building",
    "last_deployment": "2021-03-05T10:31:16Z"
  }
]

Responses

Status Meaning Description Schema
200 OK list of models ModelsResponse

get model

Code samples

curl --request GET \
  --url https://api.backprop.co/models/string \
  --header 'Accept: application/json' \
  --header 'x-api-key: API_KEY'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

headers = {
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("GET", "/models/string", headers=headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = null;

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("GET", "https://api.backprop.co/models/string");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

GET /models/{model}

Gets detailed information about a model. Cannot query global models.

Parameters

Name In Type Required Description
model path string true Name of the model

Example responses

200 Response

{
  "id": "model_model-name",
  "name": "model-name",
  "description": "Some description about model-name",
  "tasks": [
    "text-generation",
    "qa"
  ],
  "build_status": "building",
  "build_message": "some build related message",
  "last_deployment": "2021-03-05T10:31:16Z",
  "model_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/model.bin",
  "config_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/config.json",
  "requirements_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/requirements.txt"
}

Responses

Status Meaning Description Schema
200 OK detailed information about a model ModelResponse
404 Not Found model not found ModelResponse404

delete model

Code samples

curl --request DELETE \
  --url https://api.backprop.co/models/string \
  --header 'Accept: application/json' \
  --header 'x-api-key: API_KEY'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

headers = {
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("DELETE", "/models/string", headers=headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = null;

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("DELETE", "https://api.backprop.co/models/string");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

DELETE /models/{model}

deletes a model

Parameters

Name In Type Required Description
model path string true Name of the model

Example responses

200 Response

null

Responses

Status Meaning Description Schema
200 OK model deleted DeleteModelResponse
404 Not Found model not found ModelResponse404

get upload url for model

Code samples

curl --request POST \
  --url https://api.backprop.co/upload-url \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"model_name":"some-model"}'
import http.client

conn = http.client.HTTPSConnection("api.backprop.co")

payload = "{\"model_name\":\"some-model\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/upload-url", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "model_name": "some-model"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.backprop.co/upload-url");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /upload-url

(BETA) Gets an upload url for a model. The upload url is valid for an hour and takes a PUT request with a .zip file. The zip file can contain any files, but must contain three required files with the specified names:

The maximum supported size of the zip file is 5GB.

Read our deployment docs for more information.

Body parameter

{
  "model_name": "some-model"
}

Parameters

Name In Type Required Description
body body UploadUrlBody true none

Example responses

200 Response

"https://kiri-user-uploads.s3.eu-central-1.amazonaws.com/acc_id/some-model.zip"

Responses

Status Meaning Description Schema
200 OK link for PUT request with the zip file UploadUrlResponse

Schemas

TextVectorisationBody

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Text vectorisation body

Properties

Name Type Required Restrictions Description
Text vectorisation body any false none Text vectorisation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextVectorisationSingle false none Single item vectorisation

xor

Name Type Required Restrictions Description
anonymous TextVectorisationBatch false none Batch text vectorisation

TextVectorisationSingle

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Single item vectorisation

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English optimised vectorisation
* multilingual - Multilingual vectorisation in 50+ languages: ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw.
* clip - OpenAI CLIP, aligned with image vectors
* Name of your own uploaded model

Enumerated Values

Property Value
model english
model multilingual
model clip

TextVectorisationBatch

{
  "text": [
    "iPhone 12 128GB",
    "RTX 3090"
  ],
  "model": "english"
}

Batch text vectorisation

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English optimised vectorisation
* multilingual - Multilingual vectorisation in 50+ languages: ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw.
* clip - OpenAI CLIP, aligned with image vectors
* Name of your own uploaded model

Enumerated Values

Property Value
model english
model multilingual
model clip

TextVectorisationResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Vectorisation response

Properties

Name Type Required Restrictions Description
Vectorisation response any false none Vectorisation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextVectorisationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous TextVectorisationBatchResponse false none none

TextVectorisationSingleResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Single item text vectorisation response

Properties

Name Type Required Restrictions Description
vector [number] false none none

TextVectorisationBatchResponse

{
  "vector": [
    [
      0.92949192,
      0.2312301
    ],
    [
      0.82939192,
      0.5312701
    ]
  ]
}

Batch text vectorisation response

Properties

Name Type Required Restrictions Description
vector [array] false none none

QABody

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

QA body

Properties

Name Type Required Restrictions Description
QA body any false none QA body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous QASingle false none Single item QA

xor

Name Type Required Restrictions Description
anonymous QABatch false none Batch QA

QASingle

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

Single item QA

Properties

Name Type Required Restrictions Description
question string true none question to answer
context string true none context to answer based on
prev_q [string] false none none
prev_a [string] false none none
model string false none Model to use:
* english - English only QA
* Name of your own uploaded model

Enumerated Values

Property Value
model english

QABatch

{
  "question": [
    "What is the meaning of life?",
    "Where does Sally live?"
  ],
  "context": [
    "The meaning of life is 42.",
    "Sally lives in London"
  ],
  "prev_q": [
    [
      "What is not the meaning of life?"
    ],
    [
      "Where did Sally go to school?"
    ]
  ],
  "prev_a": [
    [
      "unknown"
    ],
    [
      "unknown"
    ]
  ],
  "model": "english"
}

Batch QA

Properties

Name Type Required Restrictions Description
question [string] true none list of questions to answer
context [string] true none context to answer based on
prev_q [array] false none none
prev_a [array] false none none
model string false none Model to use:
* english - English only QA
* Name of your own uploaded model

Enumerated Values

Property Value
model english

QAResponse

{
  "answer": "42"
}

QA response

Properties

Name Type Required Restrictions Description
QA response any false none QA responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous QASingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous QABatchResponse false none none

QASingleResponse

{
  "answer": "42"
}

Single item QA response

Properties

Name Type Required Restrictions Description
answer string false none none

QABatchResponse

{
  "answer": [
    "42",
    "London"
  ]
}

Batch QA response

Properties

Name Type Required Restrictions Description
answer [string] false none none

TextClassificationBody

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Text classification body

Properties

Name Type Required Restrictions Description
Text classification body any false none Text classification body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextClassificationSingle false none Single item Text Classification

xor

Name Type Required Restrictions Description
anonymous TextClassificationBatch false none Batch Text Classification

TextClassificationSingle

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Single item Text Classification

Properties

Name Type Required Restrictions Description
text string true none text to classify
labels [string] true none labels to predict probabilities for
model string false none Model to use:
* english - English only classification
* multilingual - Multilingual classification in 100+ languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish.
* Name of your own uploaded model

Enumerated Values

Property Value
model english
model multilingual

TextClassificationBatch

{
  "text": [
    "I am really mad because my product broke.",
    "I would like to collaborate with you on social media"
  ],
  "labels": [
    [
      "product issue",
      "furniture",
      "space"
    ],
    [
      "product issue",
      "furniture",
      "sales"
    ]
  ],
  "model": "english"
}

Batch Text Classification

Properties

Name Type Required Restrictions Description
text [string] true none list text to classify
labels [array] true none list of list of labels to predict probabilities for
model string false none Model to use:
* english - English only classification
* multilingual - Multilingual classification in 100+ languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish.
* Name of your own uploaded model

Enumerated Values

Property Value
model english
model multilingual

TextClassificationResponse

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Text classification response

Properties

Name Type Required Restrictions Description
Text classification response any false none Text classification responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextClassificationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous TextClassificationBatchResponse false none none

TextClassificationSingleResponse

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Single item text classification response

Properties

Name Type Required Restrictions Description
probabilities object false none dictionary where the keys are your labels and values are probabilities

TextClassificationBatchResponse

{
  "probabilities": [
    {
      "product issue": 0.98,
      "furniture": 0.1,
      "space": 0.05
    },
    {
      "product issue": 0.2,
      "furniture": 0.03,
      "sales": 0.87
    }
  ]
}

Batch text classification response

Properties

Name Type Required Restrictions Description
probabilities [object] false none list of dictionaries where the keys are your labels and values are probabilities

ImageClassificationBody

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Image classification body

Properties

Name Type Required Restrictions Description
Image classification body ImageClassificationSingle false none Image classification body

ImageClassificationSingle

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Single item image classification

Properties

Name Type Required Restrictions Description
image string true none base64 encoded image
labels [string] true none labels to predict probabilities for
model string false none Model to use:
* english - Classification with English labels. The probabilities predicted for the labels will always sum to 100%.
* Name of your own uploaded model

Enumerated Values

Property Value
model english

ImageClassificationResponse

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Image classification response

Properties

Name Type Required Restrictions Description
Image classification response ImageClassificationSingleResponse false none Image classification response

ImageClassificationSingleResponse

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Single item image classification response

Properties

Name Type Required Restrictions Description
probabilities object false none dictionary where the keys are your labels and values are probabilities.

ImageVectorisationBody

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "model": "clip"
}

Image vectorisation body

Properties

Name Type Required Restrictions Description
Image vectorisation body any false none Image vectorisation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous ImageVectorisationSingle false none Single item vectorisation

xor

Name Type Required Restrictions Description
anonymous ImageVectorisationBatch false none Batch image vectorisation

ImageVectorisationSingle

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "model": "clip"
}

Single item vectorisation

Properties

Name Type Required Restrictions Description
image string true none none
model string false none Model to use:
* clip
* Name of your own uploaded model

Enumerated Values

Property Value
model clip

ImageVectorisationBatch

{
  "image": [
    "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
    "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA"
  ],
  "model": "clip"
}

Batch image vectorisation

Properties

Name Type Required Restrictions Description
image [string] true none none
model string false none Model to use:
* clip
* Name of your own uploaded model

Enumerated Values

Property Value
model clip

ImageVectorisationResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Vectorisation response

Properties

Name Type Required Restrictions Description
Vectorisation response any false none Vectorisation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous ImageVectorisationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous ImageVectorisationBatchResponse false none none

ImageVectorisationSingleResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Single item image vectorisation response

Properties

Name Type Required Restrictions Description
vector [number] false none none

ImageVectorisationBatchResponse

{
  "vector": [
    [
      0.92949192,
      0.2312301
    ],
    [
      0.82939192,
      0.5312701
    ]
  ]
}

Batch image vectorisation response

Properties

Name Type Required Restrictions Description
vector [array] false none none

TextGenerationBody

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Text generation body

Properties

Name Type Required Restrictions Description
Text generation body any false none Generation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextGenerationSingle false none Single item text generation

xor

Name Type Required Restrictions Description
anonymous TextGenerationBatch false none Batch text generation

TextGenerationSingle

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Single item text generation

Properties

Name Type Required Restrictions Description
text string true none text to generate from
min_length integer false none minimum number of tokens to generate
max_length integer false none maximum number of tokens to generate
temperature number false none value that alters softmax probabilities. 0.0 is deterministic. As the temperature gets higher, the generated tokens get more random.
top_k integer false none sampling strategy in which probabilities are redistributed among top k most-likely tokens. 0 is a special value where all tokens are considered.
top_p number false none Sampling strategy in which probabilities are distributed among set of words with combined probability greater than p.
repetition_penalty number false none Penalty to be applied to tokens present in the text and tokens already generated in the sequence. Values higher than 1.0 penalise repetition, while lower than 1.0 encourage it.
length_penalty number false none Penalty applied to overall sequence length. Set to greater than 1.0 for longer sequences or smaller than 1.0 for shorter ones.
num_beams integer false none Number of beams to be used in beam search. (1 is no beam search)
num_generations integer false none Number of times to do generation for input.
model string false none Model to use:
* gpt2-large - An optimised large version of gpt2.
* t5-base-qa-summary-emotion - The T5 base model trained for question answering, summarisation and emotion detection.
* Name of your own uploaded model

Enumerated Values

Property Value
model gpt2-large
model t5-base-qa-summary-emotion

TextGenerationBatch

{
  "text": [
    "Geralt knew the signs, the monster was a",
    "c: Elon Musk is an entrepreneur born in 1971. q: Who is Elon Musk? a: an entrepreneur q: When was he born? a: "
  ],
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Batch text generation

Properties

Name Type Required Restrictions Description
text [string] true none text to generate from
min_length integer false none minimum number of tokens to generate
max_length integer false none maximum number of tokens to generate
temperature number false none value that alters softmax probabilities. 0.0 is deterministic. As the temperature gets higher, the generated tokens get more random.
top_k integer false none sampling strategy in which probabilities are redistributed among top k most-likely tokens. 0 is a special value where all tokens are considered.
top_p number false none Sampling strategy in which probabilities are distributed among set of words with combined probability greater than p.
repetition_penalty number false none Penalty to be applied to tokens present in the text and tokens already generated in the sequence. Values higher than 1.0 penalise repetition, while lower than 1.0 encourage it.
length_penalty number false none Penalty applied to overall sequence length. Set to greater than 1.0 for longer sequences or smaller than 1.0 for shorter ones.
num_beams integer false none Number of beams to be used in beam search. (1 is no beam search)
num_generations integer false none Number of times to do generation for input.
model string false none Model to use:
* gpt2-large - An optimised large version of gpt2.
* t5-base-qa-summary-emotion - The T5 base model trained for question answering, summarisation and emotion detection.
* Name of your own uploaded model

Enumerated Values

Property Value
model gpt2-large
model t5-base-qa-summary-emotion

TextGenerationResponse

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Text generation response

Properties

Name Type Required Restrictions Description
Text generation response any false none Text generation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous TextGenerationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous TextGenerationBatchResponse false none none

TextGenerationSingleResponse

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Single item text generation response

Properties

Name Type Required Restrictions Description
output string false none none

TextGenerationBatchResponse

{
  "output": [
    "Geralt knew the signs, the monster was a vampire that day; after the siege her companions",
    "1971"
  ]
}

Batch text generation response

Properties

Name Type Required Restrictions Description
output [string] false none none

SummarisationBody

{
  "text": "Some long text to summarise",
  "model": "english"
}

Summarisation body

Properties

Name Type Required Restrictions Description
Summarisation body any false none Summarisation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous SummarisationSingle false none Single item summarisation

xor

Name Type Required Restrictions Description
anonymous SummarisationBatch false none Batch summarisation

SummarisationSingle

{
  "text": "Some long text to summarise",
  "model": "english"
}

Single item summarisation

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English text summarisation

Enumerated Values

Property Value
model english

SummarisationBatch

{
  "text": [
    "Some long text to summarise",
    "Some more long text that needs summarising"
  ],
  "model": "english"
}

Batch summarisation

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English text summarisation

Enumerated Values

Property Value
model english

SummarisationResponse

{
  "output": "Summary of long text"
}

Summarisation response

Properties

Name Type Required Restrictions Description
Summarisation response any false none Summarisation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous SummarisationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous SummarisationBatchResponse false none none

SummarisationSingleResponse

{
  "output": "Summary of long text"
}

Single item summarisation response

Properties

Name Type Required Restrictions Description
output string false none none

SummarisationBatchResponse

{
  "output": [
    "Summary of long text",
    "Summary of some more long text"
  ]
}

Batch summarisation response

Properties

Name Type Required Restrictions Description
output [string] false none none

EmotionBody

{
  "text": "I hope this works",
  "model": "english"
}

Emotion body

Properties

Name Type Required Restrictions Description
Emotion body any false none Emotion body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous EmotionSingle false none Single item emotion detection

xor

Name Type Required Restrictions Description
anonymous EmotionBatch false none Batch emotion detection

EmotionSingle

{
  "text": "I hope this works",
  "model": "english"
}

Single item emotion detection

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English text emotion detection

Enumerated Values

Property Value
model english

EmotionBatch

{
  "text": [
    "I hope this works",
    "I'll be most upset if things go wrong"
  ],
  "model": "english"
}

Batch emotion detection

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English text emotion detection

Enumerated Values

Property Value
model english

EmotionResponse

{
  "output": "optimism"
}

Emotion detection response

Properties

Name Type Required Restrictions Description
Emotion detection response any false none Emotion detection responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous EmotionSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous EmotionBatchResponse false none none

EmotionSingleResponse

{
  "output": "optimism"
}

Single item emotion detection response

Properties

Name Type Required Restrictions Description
output string false none none

EmotionBatchResponse

{
  "output": [
    "optimism",
    "disappointment, sadness"
  ]
}

Batch emotion detection response

Properties

Name Type Required Restrictions Description
output [string] false none none

CustomBody

{
  "my-custom-field": "whatever value",
  "model": "my-model"
}

Custom task body

Properties

Name Type Required Restrictions Description
my-custom-field string false none none
model string true none Model to use:
* The name of your uploaded model that supports the custom task.

CustomResponse

{
  "output": 0.55
}

Custom model response

Properties

Name Type Required Restrictions Description
output number false none Whatever your model returns

ModelsResponse

[
  {
    "id": "model_model-name",
    "name": "model-name",
    "description": "Some description about model-name",
    "tasks": [
      "text-generation",
      "qa"
    ],
    "build_status": "building",
    "last_deployment": "2021-03-05T10:31:16Z"
  }
]

Get models response

Properties

Name Type Required Restrictions Description
Get models response [ModelsResponse_inner] false none none

ModelResponse

{
  "id": "model_model-name",
  "name": "model-name",
  "description": "Some description about model-name",
  "tasks": [
    "text-generation",
    "qa"
  ],
  "build_status": "building",
  "build_message": "some build related message",
  "last_deployment": "2021-03-05T10:31:16Z",
  "model_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/model.bin",
  "config_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/config.json",
  "requirements_url": "https://kiri-user-models.s3.eu-central-1.amazonaws.com/acc_id/some-model/requirements.txt"
}

Get model response

Properties

Name Type Required Restrictions Description
id string false none none
name string false none none
description string false none none
tasks [string] false none none
build_status string false none none
build_message string false none none
last_deployment string false none none
model_url string false none none
config_url string false none none
requirements_url string false none none

Enumerated Values

Property Value
build_status success
build_status building
build_status fail

ModelResponse404

"model some-model not found"

Model 404 response

Properties

Name Type Required Restrictions Description
Model 404 response string false none none

DeleteModelResponse

null

Delete model response

Properties

Name Type Required Restrictions Description
Delete model response any false none none

UploadUrlBody

{
  "model_name": "some-model"
}

Upload url response

Properties

Name Type Required Restrictions Description
model_name string true none 3-100 characters. Lowercase ascii with numbers, dashes (-) and underscores (_).

UploadUrlResponse

"https://kiri-user-uploads.s3.eu-central-1.amazonaws.com/acc_id/some-model.zip"

Upload url response

Properties

Name Type Required Restrictions Description
Upload url response string false none none

ModelsResponse_inner

{
  "id": "model_model-name",
  "name": "model-name",
  "description": "Some description about model-name",
  "tasks": [
    "text-generation",
    "qa"
  ],
  "build_status": "building",
  "last_deployment": "2021-03-05T10:31:16Z"
}

Properties

Name Type Required Restrictions Description
id string false none none
name string false none none
description string false none none
tasks [string] false none none
build_status string false none none
last_deployment string false none none

Enumerated Values

Property Value
build_status success
build_status building
build_status fail