Version: v3.3 
Guardrails
Guardrails are configurable sets of rules and filters designed to enforce content safety, compliance, and quality standards within your data workflows. By defining guardrails, you can automatically detect and block sensitive information, filter out inappropriate language, prevent prompt attacks, and restrict certain topics or behaviors. Guardrails help ensure that your data and AI applications operate within organizational and regulatory boundaries.
Below is the sample resource definition file for Guardrail:
{
"rGuardrail": {
"Type": "Guardrail",
"Properties": {
"GuardRailName": "MyGuardrail",
"Description": "Example guardrail for content filtering and safety",
"IsCrossRegionEnabled": true,
"Tier": "STANDARD",
"Scope": "global",
"BlockedMessage": "This content has been blocked by the guardrail",
"GroundingThreshold": 0.8,
"RelevanceThreshold": 0.7,
"EnableContextualGroundingCheck": true,
"SensitiveInformationFilters": {
"PiiTypesToFilter": [
{
"Type": "EMAIL",
"Action": "BLOCK"
},
{
"Type": "PHONE",
"Action": "ANONYMIZE"
}
],
"CustomRegexFilters": [
{
"RegexName": "CreditCardPattern",
"Pattern": "\\d{4}-\\d{4}-\\d{4}-\\d{4}",
"Action": "BLOCK"
}
]
},
"CustomWordFilters": {
"BlockWords": ["inappropriate", "spam"],
"EnableProfanityFilter": true
},
"FiltersConfig": [
{
"Type": "PROMPT_ATTACK",
"InputStrength": "NONE",
"OutputStrength": "NONE"
}
],
"DeniedTopics": [
{
"TopicName": "Politics",
"TopicDefinition": "Content related to politics",
"ExamplePhrases": ["how to hurt someone", "politics"]
}
]
}
}
}