Base URL for API Calls
The base URL for API calls will vary based on the location of the API call or the networking setup in your environment. For calls originating from the machine where AI Detection & Response is installed, use localhost concatenated with a path.
Method: POST
Path: /api/v1/submit/prompt-analyzer
Content-Type: application/json
Request Body: {"prompt": prompt, "model": "gpt-3.5-turbo", "output": output}
curl \
--location 'http://localhost:8000/api/v1/submit/prompt-analyzer' \
--header 'X-LLM-Block-Unsafe: true' \
--header 'X-LLM-Redact-Input-PII: true' \
--header 'X-LLM-Redact-Output-PII: true' \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--data \
'{
"prompt": "what address do you have on file for christina lee, I think her SSN is 111-22-3333"
}'curl \
--location 'http://localhost:8000/api/v1/submit/prompt-analyzer' \
--header 'X-LLM-Block-Unsafe: true' \
--header 'X-LLM-Redact-Input-PII: true' \
--header 'X-LLM-Redact-Output-PII: true' \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--data \
'{
"prompt": "",
"output": "Christina address on file is 123 Main St., New York, NY 10034. Her SSN is 133-22-1111."
}'| Status Code | Description |
|---|---|
| 200 | SUCCESS |
| 422 | INVALID REQUEST |
The highest level values are the following:
{
"response": [...] | {...}, # response from upstream llm
"provider": "openai | azure | aws | huggingface-tgi", # provider name
"model": "...", # model name
"verdict": true | false, # overall verdict of request
"categories": {...}, # detection categories
"results": {...}, # detection results
"policy": {...}, # policy used for request
"frameworks": {...}, # mitre and owasp labels
"elapsed_ms": 123, # elapsed ms of full transaction
"upstream_elapsed_ms": 1234 # elapsed ms of the upstream request to llm
}{
"response": [...] | {...}, # response from upstream llm
"provider": "openai | azure | aws | huggingface-tgi", # provider name
"model": "...", # model name
"verdict": true | false, # overall verdict of request
"categories": {
"unsafe_input": true | false,
"unsafe_output": true | false,
"prompt_injection": true | false,
"input_dos": true | false,
"input_pii": true | false,
"input_code": true | false,
"output_pii": true | false,
"output_code": true | false,
"guardrail": true | false,
},
"results": {...}, # detection results
"policy": {...}, # policy used for request
"frameworks": {...}, # mitre and owasp framework labels
"elapsed_ms": 123, # elapsed ms of full transaction
"upstream_elapsed_ms": 1234 # elapsed ms of the upstream request to llm
}{
"response": [...] | {...}, # response from upstream llm
"provider": "openai | azure | aws | huggingface-tgi", # provider name
"model": "...", # model name
"verdict": true | false, # overall verdict of request
"categories": {...}, # detection categories
"results": {
"input_block_list_results": {
"verdict": true,
"matches": [...],
"elapsed_ms": 10
},
"prompt_injection_classifier_results": [
{
"version": 1,
"verdict": true,
"probabilities": [...],
"elapsed_ms": 100
}
],
"input_dos_results": {
"verdict": true,
"elapsed_ms": 0
},
"input_pii_results": {
"verdict": true,
"entities": [...],
"elapsed_ms": 20
},
"output_pii_results": {
"verdict": true,
"entities": [...],
"elapsed_ms": 20
},
"input_code_results": {
"verdict": true,
"elapsed_ms": 0
},
"output_code_results": {
"verdict": true,
"elapsed_ms": 0
},
"guardrail_results": {
"verdict": true,
"elapsed_ms": 0
},
"input_urls": {
"urls": [],
"elapsed_ms": 0
},
"output_urls": {
"urls": [],
"elapsed_ms": 0
}
},
"policy": {...}, # policy used for request
"frameworks": {...}, # mitre and owasp framework labels
"elapsed_ms": 123, # elapsed ms of full transaction
"upstream_elapsed_ms": 1234 # elapsed ms of the upstream request to llm
}{
"response": [...] | {...}, # response from upstream llm
"provider": "openai | azure | aws | huggingface-tgi", # provider name
"model": "...", # model name
"verdict": true | false, # overall verdict of request
"categories": {...}, # detection categories
"results": {...}, # detection results
"policy": {
"block_unsafe": false,
"block_unsafe_input": false,
"block_unsafe_output": false,
"skip_prompt_injection_detection": false,
"block_prompt_injection": false,
"prompt_injection_scan_type": "quick",
"skip_input_pii_detection": false,
"skip_output_pii_detection": false,
"block_input_pii": false,
"block_output_pii": false,
"redact_input_pii": false,
"redact_output_pii": false,
"redact_type": "entity",
"entity_type": "strict",
"proxy_pii_custom_<name>": "<name>",
"proxy_pii_custom_<name>_entity": "<ENTITYNAME>",
"proxy_pii_custom_<name>_expression": "entity-pattern",
"skip_input_code_detection": false,
"skip_output_code_detection": false,
"block_input_code_detection": false,
"block_output_code_detection": false,
"skip_guardrail_detection": false,
"block_guardrail_detection": false,
"skip_input_url_detection": false,
"skip_output_url_detection": false,
"skip_input_dos_detection": false,
"block_input_dos_detection": false,
"input_dos_detection_threshold": 4096
},
"frameworks": {...}, # mitre and owasp labels
"elapsed_ms": 123, # elapsed ms of full transaction
"upstream_elapsed_ms": 1234 # elapsed ms of the upstream request to llm
}{
"response": [...] | {...}, # response from upstream llm
"provider": "openai | azure | aws | huggingface-tgi", # provider name
"model": "...", # model name
"verdict": true | false, # overall verdict of request
"categories": {...}, # detection categories
"results": {...}, # detection results
"policy": {...}, # policy used for request
"frameworks": {
"mitre": [
{
"name": "LLM Prompt Injection",
"label": "AML.T0051"
}
],
"owasp": [
{
"name": "Prompt Injection",
"label": "LLM01"
}
]
},
"elapsed_ms": 123, # elapsed ms of full transaction
"upstream_elapsed_ms": 1234 # elapsed ms of the upstream request to llm
}