{
  "description": "Converts audio to text by applying powerful neural network models.",
  "servicePath": "",
  "ownerDomain": "google.com",
  "canonicalName": "Speech",
  "baseUrl": "https://speech.googleapis.com/",
  "title": "Cloud Speech-to-Text API",
  "endpoints": [
    {
      "location": "us-central1",
      "description": "Regional Endpoint",
      "endpointUrl": "https://speech.us-central1.rep.googleapis.com/"
    },
    {
      "endpointUrl": "https://speech.us-west1.rep.googleapis.com/",
      "description": "Regional Endpoint",
      "location": "us-west1"
    },
    {
      "location": "me-west1",
      "description": "Regional Endpoint",
      "endpointUrl": "https://speech.me-west1.rep.googleapis.com/"
    },
    {
      "description": "Regional Endpoint",
      "endpointUrl": "https://speech.europe-west1.rep.googleapis.com/",
      "location": "europe-west1"
    },
    {
      "location": "europe-west2",
      "description": "Regional Endpoint",
      "endpointUrl": "https://speech.europe-west2.rep.googleapis.com/"
    },
    {
      "location": "europe-west3",
      "description": "Regional Endpoint",
      "endpointUrl": "https://speech.europe-west3.rep.googleapis.com/"
    },
    {
      "location": "europe-west4",
      "endpointUrl": "https://speech.europe-west4.rep.googleapis.com/",
      "description": "Regional Endpoint"
    }
  ],
  "resources": {
    "projects": {
      "resources": {
        "locations": {
          "methods": {
            "list": {
              "path": "v2/{+name}/locations",
              "flatPath": "v2/projects/{projectsId}/locations",
              "httpMethod": "GET",
              "description": "Lists information about the supported locations for this service. This method lists locations based on the resource scope provided in the [ListLocationsRequest.name] field: * **Global locations**: If `name` is empty, the method lists the public locations available to all projects. * **Project-specific locations**: If `name` follows the format `projects/{project}`, the method lists locations visible to that specific project. This includes public, private, or other project-specific locations enabled for the project. For gRPC and client library implementations, the resource name is passed as the `name` field. For direct service calls, the resource name is incorporated into the request path based on the specific service implementation and version.",
              "parameters": {
                "pageToken": {
                  "location": "query",
                  "type": "string",
                  "description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page."
                },
                "pageSize": {
                  "location": "query",
                  "description": "The maximum number of results to return. If not set, the service selects a default.",
                  "type": "integer",
                  "format": "int32"
                },
                "name": {
                  "required": true,
                  "type": "string",
                  "description": "The resource that owns the locations collection, if applicable.",
                  "location": "path",
                  "pattern": "^projects/[^/]+$"
                },
                "filter": {
                  "type": "string",
                  "description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).",
                  "location": "query"
                },
                "extraLocationTypes": {
                  "repeated": true,
                  "type": "string",
                  "description": "Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage.",
                  "location": "query"
                }
              },
              "parameterOrder": [
                "name"
              ],
              "response": {
                "$ref": "ListLocationsResponse"
              },
              "scopes": [
                "https://www.googleapis.com/auth/cloud-platform"
              ],
              "id": "speech.projects.locations.list"
            },
            "get": {
              "path": "v2/{+name}",
              "flatPath": "v2/projects/{projectsId}/locations/{locationsId}",
              "httpMethod": "GET",
              "description": "Gets information about a location.",
              "parameters": {
                "name": {
                  "required": true,
                  "type": "string",
                  "description": "Resource name for the location.",
                  "location": "path",
                  "pattern": "^projects/[^/]+/locations/[^/]+$"
                }
              },
              "parameterOrder": [
                "name"
              ],
              "response": {
                "$ref": "Location"
              },
              "scopes": [
                "https://www.googleapis.com/auth/cloud-platform"
              ],
              "id": "speech.projects.locations.get"
            }
          },
          "resources": {
            "recognizers": {
              "methods": {
                "create": {
                  "request": {
                    "$ref": "Recognizer"
                  },
                  "path": "v2/{+parent}/recognizers",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers",
                  "httpMethod": "POST",
                  "description": "Creates a Recognizer.",
                  "parameters": {
                    "validateOnly": {
                      "location": "query",
                      "type": "boolean",
                      "description": "If set, validate the request and preview the Recognizer, but do not actually create it."
                    },
                    "recognizerId": {
                      "location": "query",
                      "type": "string",
                      "description": "The ID to use for the Recognizer, which will become the final component of the Recognizer's resource name. This value should be 4-63 characters, and valid characters are /a-z-/."
                    },
                    "parent": {
                      "description": "Required. The project and location where this Recognizer will be created. The expected format is `projects/{project}/locations/{location}`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "location": "path",
                      "required": true
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.recognizers.create"
                },
                "delete": {
                  "id": "speech.projects.locations.recognizers.delete",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$",
                      "location": "path",
                      "description": "Required. The name of the Recognizer to delete. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`",
                      "type": "string",
                      "required": true
                    },
                    "validateOnly": {
                      "type": "boolean",
                      "description": "If set, validate the request and preview the deleted Recognizer, but do not actually delete it.",
                      "location": "query"
                    },
                    "allowMissing": {
                      "location": "query",
                      "type": "boolean",
                      "description": "If set to true, and the Recognizer is not found, the request will succeed and be a no-op (no Operation is recorded in this case)."
                    },
                    "etag": {
                      "location": "query",
                      "type": "string",
                      "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding."
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "description": "Deletes the Recognizer.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}",
                  "httpMethod": "DELETE",
                  "path": "v2/{+name}"
                },
                "undelete": {
                  "id": "speech.projects.locations.recognizers.undelete",
                  "parameters": {
                    "name": {
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$",
                      "type": "string",
                      "description": "Required. The name of the Recognizer to undelete. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`",
                      "required": true
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}:undelete",
                  "httpMethod": "POST",
                  "description": "Undeletes the Recognizer.",
                  "request": {
                    "$ref": "UndeleteRecognizerRequest"
                  },
                  "path": "v2/{+name}:undelete"
                },
                "list": {
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers",
                  "httpMethod": "GET",
                  "description": "Lists Recognizers.",
                  "path": "v2/{+parent}/recognizers",
                  "id": "speech.projects.locations.recognizers.list",
                  "parameters": {
                    "parent": {
                      "type": "string",
                      "description": "Required. The project and location of Recognizers to list. The expected format is `projects/{project}/locations/{location}`.",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "required": true
                    },
                    "pageToken": {
                      "type": "string",
                      "description": "A page token, received from a previous ListRecognizers call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListRecognizers must match the call that provided the page token.",
                      "location": "query"
                    },
                    "pageSize": {
                      "location": "query",
                      "type": "integer",
                      "format": "int32",
                      "description": "The maximum number of Recognizers to return. The service may return fewer than this value. If unspecified, at most 5 Recognizers will be returned. The maximum value is 100; values above 100 will be coerced to 100."
                    },
                    "showDeleted": {
                      "location": "query",
                      "type": "boolean",
                      "description": "Whether, or not, to show resources that have been deleted."
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "response": {
                    "$ref": "ListRecognizersResponse"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ]
                },
                "recognize": {
                  "id": "speech.projects.locations.recognizers.recognize",
                  "response": {
                    "$ref": "RecognizeResponse"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "recognizer": {
                      "required": true,
                      "description": "Required. The name of the Recognizer to use during recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. The {recognizer} segment may be set to `_` to use an empty implicit Recognizer.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$",
                      "location": "path"
                    }
                  },
                  "parameterOrder": [
                    "recognizer"
                  ],
                  "description": "Performs synchronous Speech recognition: receive results after all audio has been sent and processed.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}:recognize",
                  "httpMethod": "POST",
                  "path": "v2/{+recognizer}:recognize",
                  "request": {
                    "$ref": "RecognizeRequest"
                  }
                },
                "get": {
                  "id": "speech.projects.locations.recognizers.get",
                  "response": {
                    "$ref": "Recognizer"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "required": true,
                      "type": "string",
                      "description": "Required. The name of the Recognizer to retrieve. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`.",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "description": "Returns the requested Recognizer. Fails with NOT_FOUND if the requested Recognizer doesn't exist.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}",
                  "httpMethod": "GET",
                  "path": "v2/{+name}"
                },
                "batchRecognize": {
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "recognizer": {
                      "type": "string",
                      "description": "Required. The name of the Recognizer to use during recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. The {recognizer} segment may be set to `_` to use an empty implicit Recognizer.",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$",
                      "required": true
                    }
                  },
                  "parameterOrder": [
                    "recognizer"
                  ],
                  "id": "speech.projects.locations.recognizers.batchRecognize",
                  "path": "v2/{+recognizer}:batchRecognize",
                  "request": {
                    "$ref": "BatchRecognizeRequest"
                  },
                  "description": "Performs batch asynchronous speech recognition: send a request with N audio files and receive a long running operation that can be polled to see when the transcriptions are finished.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}:batchRecognize",
                  "httpMethod": "POST"
                },
                "patch": {
                  "id": "speech.projects.locations.recognizers.patch",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "updateMask": {
                      "type": "string",
                      "format": "google-fieldmask",
                      "description": "The list of fields to update. If empty, all non-default valued fields are considered for update. Use `*` to update the entire Recognizer resource.",
                      "location": "query"
                    },
                    "name": {
                      "description": "Output only. Identifier. The resource name of the Recognizer. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/recognizers/[^/]+$",
                      "location": "path",
                      "required": true
                    },
                    "validateOnly": {
                      "location": "query",
                      "description": "If set, validate the request and preview the updated Recognizer, but do not actually update it.",
                      "type": "boolean"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "description": "Updates the Recognizer.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/recognizers/{recognizersId}",
                  "httpMethod": "PATCH",
                  "path": "v2/{+name}",
                  "request": {
                    "$ref": "Recognizer"
                  }
                }
              }
            },
            "customClasses": {
              "methods": {
                "get": {
                  "description": "Returns the requested CustomClass.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses/{customClassesId}",
                  "httpMethod": "GET",
                  "path": "v2/{+name}",
                  "id": "speech.projects.locations.customClasses.get",
                  "response": {
                    "$ref": "CustomClass"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "pattern": "^projects/[^/]+/locations/[^/]+/customClasses/[^/]+$",
                      "location": "path",
                      "description": "Required. The name of the CustomClass to retrieve. The expected format is `projects/{project}/locations/{location}/customClasses/{custom_class}`.",
                      "type": "string",
                      "required": true
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ]
                },
                "patch": {
                  "request": {
                    "$ref": "CustomClass"
                  },
                  "path": "v2/{+name}",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses/{customClassesId}",
                  "httpMethod": "PATCH",
                  "description": "Updates the CustomClass.",
                  "parameters": {
                    "updateMask": {
                      "location": "query",
                      "type": "string",
                      "format": "google-fieldmask",
                      "description": "The list of fields to be updated. If empty, all fields are considered for update."
                    },
                    "name": {
                      "required": true,
                      "pattern": "^projects/[^/]+/locations/[^/]+/customClasses/[^/]+$",
                      "location": "path",
                      "description": "Output only. Identifier. The resource name of the CustomClass. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`.",
                      "type": "string"
                    },
                    "validateOnly": {
                      "location": "query",
                      "type": "boolean",
                      "description": "If set, validate the request and preview the updated CustomClass, but do not actually update it."
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.customClasses.patch"
                },
                "undelete": {
                  "request": {
                    "$ref": "UndeleteCustomClassRequest"
                  },
                  "path": "v2/{+name}:undelete",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses/{customClassesId}:undelete",
                  "httpMethod": "POST",
                  "description": "Undeletes the CustomClass.",
                  "parameters": {
                    "name": {
                      "required": true,
                      "type": "string",
                      "description": "Required. The name of the CustomClass to undelete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+/customClasses/[^/]+$"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.customClasses.undelete"
                },
                "list": {
                  "parameters": {
                    "parent": {
                      "type": "string",
                      "description": "Required. The project and location of CustomClass resources to list. The expected format is `projects/{project}/locations/{location}`.",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "required": true
                    },
                    "pageToken": {
                      "location": "query",
                      "type": "string",
                      "description": "A page token, received from a previous ListCustomClasses call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListCustomClasses must match the call that provided the page token."
                    },
                    "pageSize": {
                      "location": "query",
                      "description": "Number of results per requests. A valid page_size ranges from 0 to 100 inclusive. If the page_size is zero or unspecified, a page size of 5 will be chosen. If the page size exceeds 100, it will be coerced down to 100. Note that a call might return fewer results than the requested page size.",
                      "type": "integer",
                      "format": "int32"
                    },
                    "showDeleted": {
                      "location": "query",
                      "description": "Whether, or not, to show resources that have been deleted.",
                      "type": "boolean"
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "response": {
                    "$ref": "ListCustomClassesResponse"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.customClasses.list",
                  "path": "v2/{+parent}/customClasses",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses",
                  "httpMethod": "GET",
                  "description": "Lists CustomClasses."
                },
                "create": {
                  "id": "speech.projects.locations.customClasses.create",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "validateOnly": {
                      "location": "query",
                      "type": "boolean",
                      "description": "If set, validate the request and preview the CustomClass, but do not actually create it."
                    },
                    "parent": {
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "type": "string",
                      "description": "Required. The project and location where this CustomClass will be created. The expected format is `projects/{project}/locations/{location}`.",
                      "required": true
                    },
                    "customClassId": {
                      "description": "The ID to use for the CustomClass, which will become the final component of the CustomClass's resource name. This value should be 4-63 characters, and valid characters are /a-z-/.",
                      "type": "string",
                      "location": "query"
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "description": "Creates a CustomClass.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses",
                  "httpMethod": "POST",
                  "path": "v2/{+parent}/customClasses",
                  "request": {
                    "$ref": "CustomClass"
                  }
                },
                "delete": {
                  "id": "speech.projects.locations.customClasses.delete",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "required": true,
                      "description": "Required. The name of the CustomClass to delete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/customClasses/[^/]+$",
                      "location": "path"
                    },
                    "validateOnly": {
                      "description": "If set, validate the request and preview the deleted CustomClass, but do not actually delete it.",
                      "type": "boolean",
                      "location": "query"
                    },
                    "allowMissing": {
                      "description": "If set to true, and the CustomClass is not found, the request will succeed and be a no-op (no Operation is recorded in this case).",
                      "type": "boolean",
                      "location": "query"
                    },
                    "etag": {
                      "location": "query",
                      "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
                      "type": "string"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "description": "Deletes the CustomClass.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/customClasses/{customClassesId}",
                  "httpMethod": "DELETE",
                  "path": "v2/{+name}"
                }
              }
            },
            "config": {
              "methods": {
                "get": {
                  "parameters": {
                    "name": {
                      "required": true,
                      "pattern": "^projects/[^/]+/locations/[^/]+/config$",
                      "location": "path",
                      "description": "Required. The name of the config to retrieve. There is exactly one config resource per project per location. The expected format is `projects/{project}/locations/{location}/config`.",
                      "type": "string"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Config"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.config.get",
                  "path": "v2/{+name}",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/config",
                  "httpMethod": "GET",
                  "description": "Returns the requested Config."
                },
                "update": {
                  "response": {
                    "$ref": "Config"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "required": true,
                      "description": "Output only. Identifier. The name of the config resource. There is exactly one config resource per project per location. The expected format is `projects/{project}/locations/{location}/config`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/config$",
                      "location": "path"
                    },
                    "updateMask": {
                      "type": "string",
                      "format": "google-fieldmask",
                      "description": "The list of fields to be updated.",
                      "location": "query"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "id": "speech.projects.locations.config.update",
                  "path": "v2/{+name}",
                  "request": {
                    "$ref": "Config"
                  },
                  "description": "Updates the Config.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/config",
                  "httpMethod": "PATCH"
                }
              }
            },
            "phraseSets": {
              "methods": {
                "undelete": {
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets/{phraseSetsId}:undelete",
                  "httpMethod": "POST",
                  "description": "Undeletes the PhraseSet.",
                  "request": {
                    "$ref": "UndeletePhraseSetRequest"
                  },
                  "path": "v2/{+name}:undelete",
                  "id": "speech.projects.locations.phraseSets.undelete",
                  "parameters": {
                    "name": {
                      "required": true,
                      "description": "Required. The name of the PhraseSet to undelete. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/phraseSets/[^/]+$",
                      "location": "path"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ]
                },
                "list": {
                  "path": "v2/{+parent}/phraseSets",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets",
                  "httpMethod": "GET",
                  "description": "Lists PhraseSets.",
                  "parameters": {
                    "showDeleted": {
                      "location": "query",
                      "type": "boolean",
                      "description": "Whether, or not, to show resources that have been deleted."
                    },
                    "parent": {
                      "required": true,
                      "description": "Required. The project and location of PhraseSet resources to list. The expected format is `projects/{project}/locations/{location}`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "location": "path"
                    },
                    "pageToken": {
                      "location": "query",
                      "description": "A page token, received from a previous ListPhraseSets call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to ListPhraseSets must match the call that provided the page token.",
                      "type": "string"
                    },
                    "pageSize": {
                      "type": "integer",
                      "format": "int32",
                      "description": "The maximum number of PhraseSets to return. The service may return fewer than this value. If unspecified, at most 5 PhraseSets will be returned. The maximum value is 100; values above 100 will be coerced to 100.",
                      "location": "query"
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "response": {
                    "$ref": "ListPhraseSetsResponse"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "id": "speech.projects.locations.phraseSets.list"
                },
                "create": {
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "validateOnly": {
                      "location": "query",
                      "description": "If set, validate the request and preview the PhraseSet, but do not actually create it.",
                      "type": "boolean"
                    },
                    "parent": {
                      "description": "Required. The project and location where this PhraseSet will be created. The expected format is `projects/{project}/locations/{location}`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+$",
                      "location": "path",
                      "required": true
                    },
                    "phraseSetId": {
                      "description": "The ID to use for the PhraseSet, which will become the final component of the PhraseSet's resource name. This value should be 4-63 characters, and valid characters are /a-z-/.",
                      "type": "string",
                      "location": "query"
                    }
                  },
                  "parameterOrder": [
                    "parent"
                  ],
                  "id": "speech.projects.locations.phraseSets.create",
                  "path": "v2/{+parent}/phraseSets",
                  "request": {
                    "$ref": "PhraseSet"
                  },
                  "description": "Creates a PhraseSet.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets",
                  "httpMethod": "POST"
                },
                "delete": {
                  "id": "speech.projects.locations.phraseSets.delete",
                  "parameters": {
                    "name": {
                      "pattern": "^projects/[^/]+/locations/[^/]+/phraseSets/[^/]+$",
                      "location": "path",
                      "description": "Required. The name of the PhraseSet to delete. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`",
                      "type": "string",
                      "required": true
                    },
                    "validateOnly": {
                      "location": "query",
                      "description": "If set, validate the request and preview the deleted PhraseSet, but do not actually delete it.",
                      "type": "boolean"
                    },
                    "allowMissing": {
                      "type": "boolean",
                      "description": "If set to true, and the PhraseSet is not found, the request will succeed and be a no-op (no Operation is recorded in this case).",
                      "location": "query"
                    },
                    "etag": {
                      "type": "string",
                      "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
                      "location": "query"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets/{phraseSetsId}",
                  "httpMethod": "DELETE",
                  "description": "Deletes the PhraseSet.",
                  "path": "v2/{+name}"
                },
                "get": {
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets/{phraseSetsId}",
                  "httpMethod": "GET",
                  "description": "Returns the requested PhraseSet.",
                  "path": "v2/{+name}",
                  "id": "speech.projects.locations.phraseSets.get",
                  "parameters": {
                    "name": {
                      "pattern": "^projects/[^/]+/locations/[^/]+/phraseSets/[^/]+$",
                      "location": "path",
                      "description": "Required. The name of the PhraseSet to retrieve. The expected format is `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.",
                      "type": "string",
                      "required": true
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "PhraseSet"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ]
                },
                "patch": {
                  "id": "speech.projects.locations.phraseSets.patch",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "description": "Output only. Identifier. The resource name of the PhraseSet. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/phraseSets/[^/]+$",
                      "location": "path",
                      "required": true
                    },
                    "validateOnly": {
                      "type": "boolean",
                      "description": "If set, validate the request and preview the updated PhraseSet, but do not actually update it.",
                      "location": "query"
                    },
                    "updateMask": {
                      "location": "query",
                      "type": "string",
                      "format": "google-fieldmask",
                      "description": "The list of fields to update. If empty, all non-default valued fields are considered for update. Use `*` to update the entire PhraseSet resource."
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "description": "Updates the PhraseSet.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/phraseSets/{phraseSetsId}",
                  "httpMethod": "PATCH",
                  "path": "v2/{+name}",
                  "request": {
                    "$ref": "PhraseSet"
                  }
                }
              }
            },
            "operations": {
              "methods": {
                "get": {
                  "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}",
                  "httpMethod": "GET",
                  "path": "v2/{+name}",
                  "id": "speech.projects.locations.operations.get",
                  "response": {
                    "$ref": "Operation"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "parameters": {
                    "name": {
                      "required": true,
                      "description": "The name of the operation resource.",
                      "type": "string",
                      "pattern": "^projects/[^/]+/locations/[^/]+/operations/[^/]+$",
                      "location": "path"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ]
                },
                "list": {
                  "id": "speech.projects.locations.operations.list",
                  "parameters": {
                    "pageSize": {
                      "location": "query",
                      "type": "integer",
                      "format": "int32",
                      "description": "The standard list page size."
                    },
                    "pageToken": {
                      "location": "query",
                      "description": "The standard list page token.",
                      "type": "string"
                    },
                    "returnPartialSuccess": {
                      "type": "boolean",
                      "description": "When set to `true`, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field. This can only be `true` when reading across collections. For example, when `parent` is set to `\"projects/example/locations/-\"`. This field is not supported by default and will result in an `UNIMPLEMENTED` error if set unless explicitly documented otherwise in service or product specific documentation.",
                      "location": "query"
                    },
                    "name": {
                      "required": true,
                      "type": "string",
                      "description": "The name of the operation's parent resource.",
                      "location": "path",
                      "pattern": "^projects/[^/]+/locations/[^/]+$"
                    },
                    "filter": {
                      "location": "query",
                      "description": "The standard list filter.",
                      "type": "string"
                    }
                  },
                  "parameterOrder": [
                    "name"
                  ],
                  "response": {
                    "$ref": "ListOperationsResponse"
                  },
                  "scopes": [
                    "https://www.googleapis.com/auth/cloud-platform"
                  ],
                  "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/operations",
                  "httpMethod": "GET",
                  "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.",
                  "path": "v2/{+name}/operations"
                }
              }
            }
          }
        }
      }
    }
  },
  "fullyEncodeReservedExpansion": true,
  "schemas": {
    "Status": {
      "id": "Status",
      "type": "object",
      "description": "The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).",
      "properties": {
        "message": {
          "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.",
          "type": "string"
        },
        "code": {
          "type": "integer",
          "format": "int32",
          "description": "The status code, which should be an enum value of google.rpc.Code."
        },
        "details": {
          "type": "array",
          "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.",
          "items": {
            "type": "object",
            "additionalProperties": {
              "description": "Properties of the object. Contains field @type with type URL.",
              "type": "any"
            }
          }
        }
      }
    },
    "UndeletePhraseSetRequest": {
      "description": "Request message for the UndeletePhraseSet method.",
      "id": "UndeletePhraseSetRequest",
      "type": "object",
      "properties": {
        "name": {
          "description": "Required. The name of the PhraseSet to undelete. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`",
          "type": "string"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the undeleted PhraseSet, but do not actually undelete it."
        },
        "etag": {
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      }
    },
    "CloudStorageResult": {
      "id": "CloudStorageResult",
      "type": "object",
      "description": "Final results written to Cloud Storage.",
      "properties": {
        "vttFormatUri": {
          "description": "The Cloud Storage URI to which recognition results were written as VTT formatted captions. This is populated only when `VTT` output is requested.",
          "type": "string"
        },
        "srtFormatUri": {
          "type": "string",
          "description": "The Cloud Storage URI to which recognition results were written as SRT formatted captions. This is populated only when `SRT` output is requested."
        },
        "uri": {
          "type": "string",
          "description": "The Cloud Storage URI to which recognition results were written."
        }
      }
    },
    "CreateRecognizerRequest": {
      "description": "Request message for the CreateRecognizer method.",
      "id": "CreateRecognizerRequest",
      "type": "object",
      "properties": {
        "parent": {
          "type": "string",
          "description": "Required. The project and location where this Recognizer will be created. The expected format is `projects/{project}/locations/{location}`."
        },
        "recognizer": {
          "$ref": "Recognizer",
          "description": "Required. The Recognizer to create."
        },
        "validateOnly": {
          "description": "If set, validate the request and preview the Recognizer, but do not actually create it.",
          "type": "boolean"
        },
        "recognizerId": {
          "type": "string",
          "description": "The ID to use for the Recognizer, which will become the final component of the Recognizer's resource name. This value should be 4-63 characters, and valid characters are /a-z-/."
        }
      }
    },
    "UndeleteCustomClassRequest": {
      "properties": {
        "name": {
          "type": "string",
          "description": "Required. The name of the CustomClass to undelete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the undeleted CustomClass, but do not actually undelete it."
        },
        "etag": {
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      },
      "description": "Request message for the UndeleteCustomClass method.",
      "id": "UndeleteCustomClassRequest",
      "type": "object"
    },
    "CustomClass": {
      "id": "CustomClass",
      "type": "object",
      "description": "CustomClass for biasing in speech recognition. Used to define a set of words or phrases that represents a common concept or theme likely to appear in your audio, for example a list of passenger ship names.",
      "properties": {
        "state": {
          "type": "string",
          "enum": [
            "STATE_UNSPECIFIED",
            "ACTIVE",
            "DELETED"
          ],
          "description": "Output only. The CustomClass lifecycle state.",
          "readOnly": true,
          "enumDescriptions": [
            "Unspecified state. This is only used/useful for distinguishing unset values.",
            "The normal and active state.",
            "This CustomClass has been deleted."
          ]
        },
        "reconciling": {
          "readOnly": true,
          "type": "boolean",
          "description": "Output only. Whether or not this CustomClass is in the process of being updated."
        },
        "updateTime": {
          "readOnly": true,
          "description": "Output only. The most recent time this resource was modified.",
          "type": "string",
          "format": "google-datetime"
        },
        "kmsKeyName": {
          "readOnly": true,
          "description": "Output only. The [KMS key name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which the CustomClass is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.",
          "type": "string"
        },
        "createTime": {
          "readOnly": true,
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. Creation time."
        },
        "uid": {
          "type": "string",
          "description": "Output only. System-assigned unique identifier for the CustomClass.",
          "readOnly": true
        },
        "deleteTime": {
          "readOnly": true,
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. The time at which this resource was requested for deletion."
        },
        "annotations": {
          "type": "object",
          "description": "Optional. Allows users to store small amounts of arbitrary data. Both the key and the value must be 63 characters or less each. At most 100 annotations.",
          "additionalProperties": {
            "type": "string"
          }
        },
        "etag": {
          "type": "string",
          "description": "Output only. This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "readOnly": true
        },
        "expireTime": {
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. The time at which this resource will be purged.",
          "readOnly": true
        },
        "displayName": {
          "type": "string",
          "description": "Optional. User-settable, human-readable name for the CustomClass. Must be 63 characters or less."
        },
        "items": {
          "type": "array",
          "description": "A collection of class items.",
          "items": {
            "$ref": "ClassItem"
          }
        },
        "name": {
          "readOnly": true,
          "description": "Output only. Identifier. The resource name of the CustomClass. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`.",
          "type": "string"
        },
        "kmsKeyVersionName": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [KMS key version name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions) with which the CustomClass is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}`."
        }
      }
    },
    "StreamingRecognitionResult": {
      "properties": {
        "resultEndOffset": {
          "description": "Time offset of the end of this result relative to the beginning of the audio.",
          "type": "string",
          "format": "google-duration"
        },
        "alternatives": {
          "type": "array",
          "description": "May contain one or more recognition hypotheses. These alternatives are ordered in terms of accuracy, with the top (first) alternative being the most probable, as ranked by the recognizer.",
          "items": {
            "$ref": "SpeechRecognitionAlternative"
          }
        },
        "channelTag": {
          "type": "integer",
          "format": "int32",
          "description": "For multi-channel audio, this is the channel number corresponding to the recognized result for the audio from that channel. For `audio_channel_count` = `N`, its output values can range from `1` to `N`."
        },
        "isFinal": {
          "type": "boolean",
          "description": "If `false`, this StreamingRecognitionResult represents an interim result that may change. If `true`, this is the final time the speech service will return this particular StreamingRecognitionResult, the recognizer will not return any further hypotheses for this portion of the transcript and corresponding audio."
        },
        "stability": {
          "type": "number",
          "format": "float",
          "description": "An estimate of the likelihood that the recognizer will not change its guess about this interim result. Values range from 0.0 (completely unstable) to 1.0 (completely stable). This field is only provided for interim results (is_final=`false`). The default of 0.0 is a sentinel value indicating `stability` was not set."
        },
        "languageCode": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the language in this result. This language code was detected to have the most likelihood of being spoken in the audio."
        }
      },
      "description": "A streaming speech recognition result corresponding to a portion of the audio that is currently being processed.",
      "id": "StreamingRecognitionResult",
      "type": "object"
    },
    "BatchRecognizeRequest": {
      "id": "BatchRecognizeRequest",
      "type": "object",
      "description": "Request message for the BatchRecognize method.",
      "properties": {
        "config": {
          "$ref": "RecognitionConfig",
          "description": "Features and audio metadata to use for the Automatic Speech Recognition. This field in combination with the config_mask field can be used to override parts of the default_recognition_config of the Recognizer resource."
        },
        "processingStrategy": {
          "enumDescriptions": [
            "Default value for the processing strategy. The request is processed as soon as its received.",
            "If selected, processes the request during lower utilization periods for a price discount. The request is fulfilled within 24 hours."
          ],
          "description": "Processing strategy to use for this request.",
          "type": "string",
          "enum": [
            "PROCESSING_STRATEGY_UNSPECIFIED",
            "DYNAMIC_BATCHING"
          ]
        },
        "configMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields in config that override the values in the default_recognition_config of the recognizer during this recognition request. If no mask is provided, all given fields in config override the values in the recognizer for this recognition request. If a mask is provided, only the fields listed in the mask override the config in the recognizer for this recognition request. If a wildcard (`*`) is provided, config completely overrides and replaces the config in the recognizer for this recognition request."
        },
        "recognitionOutputConfig": {
          "description": "Configuration options for where to output the transcripts of each file.",
          "$ref": "RecognitionOutputConfig"
        },
        "files": {
          "type": "array",
          "description": "Audio files with file metadata for ASR. The maximum number of files allowed to be specified is 15.",
          "items": {
            "$ref": "BatchRecognizeFileMetadata"
          }
        },
        "recognizer": {
          "type": "string",
          "description": "Required. The name of the Recognizer to use during recognition. The expected format is `projects/{project}/locations/{location}/recognizers/{recognizer}`. The {recognizer} segment may be set to `_` to use an empty implicit Recognizer."
        }
      }
    },
    "SpeechAdaptation": {
      "description": "Provides \"hints\" to the speech recognizer to favor specific words and phrases in the results. PhraseSets can be specified as an inline resource, or a reference to an existing PhraseSet resource.",
      "id": "SpeechAdaptation",
      "type": "object",
      "properties": {
        "phraseSets": {
          "type": "array",
          "description": "A list of inline or referenced PhraseSets.",
          "items": {
            "$ref": "AdaptationPhraseSet"
          }
        },
        "customClasses": {
          "type": "array",
          "description": "A list of inline CustomClasses. Existing CustomClass resources can be referenced directly in a PhraseSet.",
          "items": {
            "$ref": "CustomClass"
          }
        }
      }
    },
    "DenoiserConfig": {
      "properties": {
        "snrThreshold": {
          "description": "Signal-to-Noise Ratio (SNR) threshold for the denoiser. Here SNR means the loudness of the speech signal. Audio with an SNR below this threshold, meaning the speech is too quiet, will be prevented from being sent to the transcription model. If snr_threshold=0, no filtering will be applied.",
          "type": "number",
          "format": "float"
        },
        "denoiseAudio": {
          "type": "boolean",
          "description": "Denoise audio before sending to the transcription model."
        }
      },
      "id": "DenoiserConfig",
      "type": "object",
      "description": "Denoiser config. May not be supported for all models and may have no effect."
    },
    "Phrase": {
      "properties": {
        "value": {
          "type": "string",
          "description": "The phrase itself."
        },
        "boost": {
          "type": "number",
          "format": "float",
          "description": "Hint Boost. Overrides the boost set at the phrase set level. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost values would correspond to anti-biasing. Anti-biasing is not enabled, so negative boost values will return an error. Boost values must be between 0 and 20. Any values outside that range will return an error. We recommend using a binary search approach to finding the optimal value for your use case as well as adding phrases both with and without boost to your requests."
        }
      },
      "id": "Phrase",
      "type": "object",
      "description": "A Phrase contains words and phrase \"hints\" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. List items can also include CustomClass references containing groups of words that represent common concepts that occur in natural language."
    },
    "PhraseSet": {
      "properties": {
        "boost": {
          "type": "number",
          "format": "float",
          "description": "Hint Boost. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Valid `boost` values are between 0 (exclusive) and 20. We recommend using a binary search approach to finding the optimal value for your use case as well as adding phrases both with and without boost to your requests."
        },
        "name": {
          "type": "string",
          "description": "Output only. Identifier. The resource name of the PhraseSet. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`.",
          "readOnly": true
        },
        "kmsKeyVersionName": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [KMS key version name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions) with which the PhraseSet is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}`."
        },
        "annotations": {
          "description": "Allows users to store small amounts of arbitrary data. Both the key and the value must be 63 characters or less each. At most 100 annotations.",
          "additionalProperties": {
            "type": "string"
          },
          "type": "object"
        },
        "etag": {
          "description": "Output only. This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string",
          "readOnly": true
        },
        "uid": {
          "type": "string",
          "description": "Output only. System-assigned unique identifier for the PhraseSet.",
          "readOnly": true
        },
        "deleteTime": {
          "readOnly": true,
          "description": "Output only. The time at which this resource was requested for deletion.",
          "type": "string",
          "format": "google-datetime"
        },
        "phrases": {
          "type": "array",
          "description": "A list of word and phrases.",
          "items": {
            "$ref": "Phrase"
          }
        },
        "expireTime": {
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. The time at which this resource will be purged.",
          "readOnly": true
        },
        "displayName": {
          "type": "string",
          "description": "User-settable, human-readable name for the PhraseSet. Must be 63 characters or less."
        },
        "state": {
          "type": "string",
          "enum": [
            "STATE_UNSPECIFIED",
            "ACTIVE",
            "DELETED"
          ],
          "description": "Output only. The PhraseSet lifecycle state.",
          "readOnly": true,
          "enumDescriptions": [
            "Unspecified state. This is only used/useful for distinguishing unset values.",
            "The normal and active state.",
            "This PhraseSet has been deleted."
          ]
        },
        "reconciling": {
          "readOnly": true,
          "description": "Output only. Whether or not this PhraseSet is in the process of being updated.",
          "type": "boolean"
        },
        "kmsKeyName": {
          "description": "Output only. The [KMS key name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which the PhraseSet is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.",
          "type": "string",
          "readOnly": true
        },
        "createTime": {
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. Creation time.",
          "readOnly": true
        },
        "updateTime": {
          "description": "Output only. The most recent time this resource was modified.",
          "type": "string",
          "format": "google-datetime",
          "readOnly": true
        }
      },
      "id": "PhraseSet",
      "type": "object",
      "description": "PhraseSet for biasing in speech recognition. A PhraseSet is used to provide \"hints\" to the speech recognizer to favor specific words and phrases in the results."
    },
    "Operation": {
      "description": "This resource represents a long-running operation that is the result of a network API call.",
      "id": "Operation",
      "type": "object",
      "properties": {
        "name": {
          "type": "string",
          "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."
        },
        "done": {
          "type": "boolean",
          "description": "If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."
        },
        "error": {
          "description": "The error result of the operation in case of failure or cancellation.",
          "$ref": "Status"
        },
        "metadata": {
          "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.",
          "additionalProperties": {
            "description": "Properties of the object. Contains field @type with type URL.",
            "type": "any"
          },
          "type": "object"
        },
        "response": {
          "description": "The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.",
          "additionalProperties": {
            "description": "Properties of the object. Contains field @type with type URL.",
            "type": "any"
          },
          "type": "object"
        }
      }
    },
    "SpeechRecognitionAlternative": {
      "properties": {
        "words": {
          "description": "A list of word-specific information for each recognized word. When the SpeakerDiarizationConfig is set, you will see all the words from the beginning of the audio.",
          "items": {
            "$ref": "WordInfo"
          },
          "type": "array"
        },
        "transcript": {
          "type": "string",
          "description": "Transcript text representing the words that the user spoke."
        },
        "confidence": {
          "type": "number",
          "format": "float",
          "description": "The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where is_final is set to `true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set."
        }
      },
      "id": "SpeechRecognitionAlternative",
      "type": "object",
      "description": "Alternative hypotheses (a.k.a. n-best list)."
    },
    "SrtOutputFileFormatConfig": {
      "id": "SrtOutputFileFormatConfig",
      "type": "object",
      "description": "Output configurations [SubRip Text](https://www.matroska.org/technical/subtitles.html#srt-subtitles) formatted subtitle file.",
      "properties": {}
    },
    "ExplicitDecodingConfig": {
      "id": "ExplicitDecodingConfig",
      "type": "object",
      "description": "Explicitly specified decoding parameters.",
      "properties": {
        "sampleRateHertz": {
          "type": "integer",
          "format": "int32",
          "description": "Optional. Sample rate in Hertz of the audio data sent for recognition. Valid values are: 8000-48000, and 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of resampling). Note that this field is marked as OPTIONAL for backward compatibility reasons. It is (and has always been) effectively REQUIRED."
        },
        "encoding": {
          "type": "string",
          "enum": [
            "AUDIO_ENCODING_UNSPECIFIED",
            "LINEAR16",
            "MULAW",
            "ALAW",
            "AMR",
            "AMR_WB",
            "FLAC",
            "MP3",
            "OGG_OPUS",
            "WEBM_OPUS",
            "MP4_AAC",
            "M4A_AAC",
            "MOV_AAC"
          ],
          "description": "Required. Encoding of the audio data sent for recognition.",
          "enumDescriptions": [
            "Default value. This value is unused.",
            "Headerless 16-bit signed little-endian PCM samples.",
            "Headerless 8-bit companded mulaw samples.",
            "Headerless 8-bit companded alaw samples.",
            "AMR frames with an rfc4867.5 header.",
            "AMR-WB frames with an rfc4867.5 header.",
            "FLAC frames in the \"native FLAC\" container format.",
            "MPEG audio frames with optional (ignored) ID3 metadata.",
            "Opus audio frames in an Ogg container.",
            "Opus audio frames in a WebM container.",
            "AAC audio frames in an MP4 container.",
            "AAC audio frames in an M4A container.",
            "AAC audio frames in an MOV container."
          ]
        },
        "audioChannelCount": {
          "type": "integer",
          "format": "int32",
          "description": "Optional. Number of channels present in the audio data sent for recognition. Note that this field is marked as OPTIONAL for backward compatibility reasons. It is (and has always been) effectively REQUIRED. The maximum allowed value is 8."
        }
      }
    },
    "WordInfo": {
      "description": "Word-specific information for recognized words.",
      "id": "WordInfo",
      "type": "object",
      "properties": {
        "word": {
          "description": "The word corresponding to this set of information.",
          "type": "string"
        },
        "startOffset": {
          "type": "string",
          "format": "google-duration",
          "description": "Time offset relative to the beginning of the audio, and corresponding to the start of the spoken word. This field is only set if enable_word_time_offsets is `true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary."
        },
        "confidence": {
          "description": "The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where is_final is set to `true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set.",
          "type": "number",
          "format": "float"
        },
        "speakerLabel": {
          "type": "string",
          "description": "A distinct label is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. `speaker_label` is set if SpeakerDiarizationConfig is given and only in the top alternative."
        },
        "endOffset": {
          "type": "string",
          "format": "google-duration",
          "description": "Time offset relative to the beginning of the audio, and corresponding to the end of the spoken word. This field is only set if enable_word_time_offsets is `true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary."
        }
      }
    },
    "LocationsMetadata": {
      "description": "Main metadata for the Locations API for STT V2. Currently this is just the metadata about locales, models, and features",
      "id": "LocationsMetadata",
      "type": "object",
      "properties": {
        "accessMetadata": {
          "$ref": "AccessMetadata",
          "description": "Information about access metadata for the region and given project."
        },
        "languages": {
          "$ref": "LanguageMetadata",
          "description": "Information about available locales, models, and features represented in the hierarchical structure of locales -\u003e models -\u003e features"
        }
      }
    },
    "Location": {
      "id": "Location",
      "type": "object",
      "description": "A resource that represents a Google Cloud location.",
      "properties": {
        "name": {
          "type": "string",
          "description": "Resource name for the location, which may vary between implementations. For example: `\"projects/example-project/locations/us-east1\"`"
        },
        "displayName": {
          "description": "The friendly name for this location, typically a nearby city name. For example, \"Tokyo\".",
          "type": "string"
        },
        "locationId": {
          "description": "The canonical id for this location. For example: `\"us-east1\"`.",
          "type": "string"
        },
        "labels": {
          "type": "object",
          "description": "Cross-service attributes for the location. For example {\"cloud.googleapis.com/region\": \"us-east1\"}",
          "additionalProperties": {
            "type": "string"
          }
        },
        "metadata": {
          "description": "Service-specific metadata. For example the available capacity at the given location.",
          "additionalProperties": {
            "description": "Properties of the object. Contains field @type with type URL.",
            "type": "any"
          },
          "type": "object"
        }
      }
    },
    "ModelFeature": {
      "properties": {
        "releaseState": {
          "description": "The release state of the feature",
          "type": "string"
        },
        "feature": {
          "description": "The name of the feature (Note: the feature can be `recognizer`)",
          "type": "string"
        }
      },
      "id": "ModelFeature",
      "type": "object",
      "description": "Represents a singular feature of a model. If the feature is `recognizer`, the release_state of the feature represents the release_state of the model"
    },
    "AutoDetectDecodingConfig": {
      "description": "Automatically detected decoding parameters. Supported for the following encodings: * WAV_LINEAR16: 16-bit signed little-endian PCM samples in a WAV container. * WAV_MULAW: 8-bit companded mulaw samples in a WAV container. * WAV_ALAW: 8-bit companded alaw samples in a WAV container. * RFC4867_5_AMR: AMR frames with an rfc4867.5 header. * RFC4867_5_AMRWB: AMR-WB frames with an rfc4867.5 header. * FLAC: FLAC frames in the \"native FLAC\" container format. * MP3: MPEG audio frames with optional (ignored) ID3 metadata. * OGG_OPUS: Opus audio frames in an Ogg container. * WEBM_OPUS: Opus audio frames in a WebM container. * MP4_AAC: AAC audio frames in an MP4 container. * M4A_AAC: AAC audio frames in an M4A container. * MOV_AAC: AAC audio frames in an MOV container.",
      "id": "AutoDetectDecodingConfig",
      "type": "object",
      "properties": {}
    },
    "RecognizeRequest": {
      "id": "RecognizeRequest",
      "type": "object",
      "description": "Request message for the Recognize method. Either `content` or `uri` must be supplied. Supplying both or neither returns INVALID_ARGUMENT. See [content limits](https://cloud.google.com/speech-to-text/quotas#content).",
      "properties": {
        "configMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields in config that override the values in the default_recognition_config of the recognizer during this recognition request. If no mask is provided, all non-default valued fields in config override the values in the recognizer for this recognition request. If a mask is provided, only the fields listed in the mask override the config in the recognizer for this recognition request. If a wildcard (`*`) is provided, config completely overrides and replaces the config in the recognizer for this recognition request."
        },
        "content": {
          "description": "The audio data bytes encoded as specified in RecognitionConfig. As with all bytes fields, proto buffers use a pure binary representation, whereas JSON representations use base64.",
          "type": "string",
          "format": "byte"
        },
        "config": {
          "$ref": "RecognitionConfig",
          "description": "Features and audio metadata to use for the Automatic Speech Recognition. This field in combination with the config_mask field can be used to override parts of the default_recognition_config of the Recognizer resource."
        },
        "uri": {
          "type": "string",
          "description": "URI that points to a file that contains audio data bytes as specified in RecognitionConfig. The file must not be compressed (for example, gzip). Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: `gs://bucket_name/object_name` (other URI formats return INVALID_ARGUMENT). For more information, see [Request URIs](https://cloud.google.com/storage/docs/reference-uris)."
        }
      }
    },
    "InlineResult": {
      "properties": {
        "transcript": {
          "$ref": "BatchRecognizeResults",
          "description": "The transcript for the audio file."
        },
        "vttCaptions": {
          "type": "string",
          "description": "The transcript for the audio file as VTT formatted captions. This is populated only when `VTT` output is requested."
        },
        "srtCaptions": {
          "description": "The transcript for the audio file as SRT formatted captions. This is populated only when `SRT` output is requested.",
          "type": "string"
        }
      },
      "id": "InlineResult",
      "type": "object",
      "description": "Final results returned inline in the recognition response."
    },
    "AdaptationPhraseSet": {
      "properties": {
        "phraseSet": {
          "description": "The name of an existing PhraseSet resource. The user must have read access to the resource and it must not be deleted.",
          "type": "string"
        },
        "inlinePhraseSet": {
          "description": "An inline defined PhraseSet.",
          "$ref": "PhraseSet"
        }
      },
      "id": "AdaptationPhraseSet",
      "type": "object",
      "description": "A biasing PhraseSet, which can be either a string referencing the name of an existing PhraseSets resource, or an inline definition of a PhraseSet."
    },
    "RecognitionOutputConfig": {
      "description": "Configuration options for the output(s) of recognition.",
      "id": "RecognitionOutputConfig",
      "type": "object",
      "properties": {
        "inlineResponseConfig": {
          "description": "If this message is populated, recognition results are provided in the BatchRecognizeResponse message of the Operation when completed. This is only supported when calling BatchRecognize with just one audio file.",
          "$ref": "InlineOutputConfig"
        },
        "gcsOutputConfig": {
          "$ref": "GcsOutputConfig",
          "description": "If this message is populated, recognition results are written to the provided Google Cloud Storage URI."
        },
        "outputFormatConfig": {
          "$ref": "OutputFormatConfig",
          "description": "Optional. Configuration for the format of the results stored to `output`. If unspecified transcripts will be written in the `NATIVE` format only."
        }
      }
    },
    "UpdateConfigRequest": {
      "properties": {
        "updateMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields to be updated."
        },
        "config": {
          "$ref": "Config",
          "description": "Required. The config to update. The config's `name` field is used to identify the config to be updated. The expected format is `projects/{project}/locations/{location}/config`."
        }
      },
      "description": "Request message for the UpdateConfig method.",
      "id": "UpdateConfigRequest",
      "type": "object"
    },
    "ListLocationsResponse": {
      "properties": {
        "locations": {
          "type": "array",
          "description": "A list of locations that matches the specified filter in the request.",
          "items": {
            "$ref": "Location"
          }
        },
        "nextPageToken": {
          "description": "The standard List next-page token.",
          "type": "string"
        }
      },
      "id": "ListLocationsResponse",
      "type": "object",
      "description": "The response message for Locations.ListLocations."
    },
    "UpdatePhraseSetRequest": {
      "id": "UpdatePhraseSetRequest",
      "type": "object",
      "description": "Request message for the UpdatePhraseSet method.",
      "properties": {
        "phraseSet": {
          "$ref": "PhraseSet",
          "description": "Required. The PhraseSet to update. The PhraseSet's `name` field is used to identify the PhraseSet to update. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`."
        },
        "updateMask": {
          "description": "The list of fields to update. If empty, all non-default valued fields are considered for update. Use `*` to update the entire PhraseSet resource.",
          "type": "string",
          "format": "google-fieldmask"
        },
        "validateOnly": {
          "description": "If set, validate the request and preview the updated PhraseSet, but do not actually update it.",
          "type": "boolean"
        }
      }
    },
    "ListCustomClassesResponse": {
      "id": "ListCustomClassesResponse",
      "type": "object",
      "description": "Response message for the ListCustomClasses method.",
      "properties": {
        "customClasses": {
          "type": "array",
          "description": "The list of requested CustomClasses.",
          "items": {
            "$ref": "CustomClass"
          }
        },
        "nextPageToken": {
          "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. This token expires after 72 hours.",
          "type": "string"
        }
      }
    },
    "UndeleteRecognizerRequest": {
      "id": "UndeleteRecognizerRequest",
      "type": "object",
      "description": "Request message for the UndeleteRecognizer method.",
      "properties": {
        "name": {
          "type": "string",
          "description": "Required. The name of the Recognizer to undelete. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the undeleted Recognizer, but do not actually undelete it."
        },
        "etag": {
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      }
    },
    "DeleteRecognizerRequest": {
      "properties": {
        "name": {
          "type": "string",
          "description": "Required. The name of the Recognizer to delete. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`"
        },
        "validateOnly": {
          "description": "If set, validate the request and preview the deleted Recognizer, but do not actually delete it.",
          "type": "boolean"
        },
        "allowMissing": {
          "description": "If set to true, and the Recognizer is not found, the request will succeed and be a no-op (no Operation is recorded in this case).",
          "type": "boolean"
        },
        "etag": {
          "type": "string",
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding."
        }
      },
      "id": "DeleteRecognizerRequest",
      "type": "object",
      "description": "Request message for the DeleteRecognizer method."
    },
    "ClassItem": {
      "properties": {
        "value": {
          "type": "string",
          "description": "The class item's value."
        }
      },
      "description": "An item of the class.",
      "id": "ClassItem",
      "type": "object"
    },
    "DeleteCustomClassRequest": {
      "properties": {
        "name": {
          "description": "Required. The name of the CustomClass to delete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`",
          "type": "string"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the deleted CustomClass, but do not actually delete it."
        },
        "allowMissing": {
          "type": "boolean",
          "description": "If set to true, and the CustomClass is not found, the request will succeed and be a no-op (no Operation is recorded in this case)."
        },
        "etag": {
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      },
      "id": "DeleteCustomClassRequest",
      "type": "object",
      "description": "Request message for the DeleteCustomClass method."
    },
    "RecognitionFeatures": {
      "properties": {
        "profanityFilter": {
          "type": "boolean",
          "description": "If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, for instance, \"f***\". If set to `false` or omitted, profanities won't be filtered out."
        },
        "customPromptConfig": {
          "description": "Optional. Configuration to enable custom prompt for chirp3.",
          "$ref": "CustomPromptConfig"
        },
        "diarizationConfig": {
          "description": "Configuration to enable speaker diarization. To enable diarization, set this field to an empty SpeakerDiarizationConfig message.",
          "$ref": "SpeakerDiarizationConfig"
        },
        "enableSpokenPunctuation": {
          "type": "boolean",
          "description": "The spoken punctuation behavior for the call. If `true`, replaces spoken punctuation with the corresponding symbols in the request. For example, \"how are you question mark\" becomes \"how are you?\". See https://cloud.google.com/speech-to-text/docs/spoken-punctuation for support. If `false`, spoken punctuation is not replaced."
        },
        "enableSpokenEmojis": {
          "description": "The spoken emoji behavior for the call. If `true`, adds spoken emoji formatting for the request. This will replace spoken emojis with the corresponding Unicode symbols in the final transcript. If `false`, spoken emojis are not replaced.",
          "type": "boolean"
        },
        "maxAlternatives": {
          "description": "Maximum number of recognition hypotheses to be returned. The server may return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of one. If omitted, will return a maximum of one.",
          "type": "integer",
          "format": "int32"
        },
        "enableWordTimeOffsets": {
          "type": "boolean",
          "description": "If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`."
        },
        "enableWordConfidence": {
          "type": "boolean",
          "description": "If `true`, the top result includes a list of words and the confidence for those words. If `false`, no word-level confidence information is returned. The default is `false`."
        },
        "enableAutomaticPunctuation": {
          "description": "If `true`, adds punctuation to recognition result hypotheses. This feature is only available in select languages. The default `false` value does not add punctuation to result hypotheses.",
          "type": "boolean"
        },
        "multiChannelMode": {
          "type": "string",
          "enum": [
            "MULTI_CHANNEL_MODE_UNSPECIFIED",
            "SEPARATE_RECOGNITION_PER_CHANNEL"
          ],
          "description": "Mode for recognizing multi-channel audio.",
          "enumDescriptions": [
            "Default value for the multi-channel mode. If the audio contains multiple channels, only the first channel will be transcribed; other channels will be ignored.",
            "If selected, each channel in the provided audio is transcribed independently. This cannot be selected if the selected model is `latest_short`."
          ]
        }
      },
      "description": "Available recognition features.",
      "id": "RecognitionFeatures",
      "type": "object"
    },
    "RecognizeResponse": {
      "description": "Response message for the Recognize method.",
      "id": "RecognizeResponse",
      "type": "object",
      "properties": {
        "metadata": {
          "description": "Metadata about the recognition.",
          "$ref": "RecognitionResponseMetadata"
        },
        "results": {
          "description": "Sequential list of transcription results corresponding to sequential portions of audio.",
          "items": {
            "$ref": "SpeechRecognitionResult"
          },
          "type": "array"
        }
      }
    },
    "BatchRecognizeTranscriptionMetadata": {
      "properties": {
        "uri": {
          "description": "The Cloud Storage URI to which recognition results will be written.",
          "type": "string"
        },
        "progressPercent": {
          "description": "How much of the file has been transcribed so far.",
          "type": "integer",
          "format": "int32"
        },
        "error": {
          "$ref": "Status",
          "description": "Error if one was encountered."
        }
      },
      "description": "Metadata about transcription for a single file (for example, progress percent).",
      "id": "BatchRecognizeTranscriptionMetadata",
      "type": "object"
    },
    "SpeechRecognitionResult": {
      "properties": {
        "alternatives": {
          "type": "array",
          "description": "May contain one or more recognition hypotheses. These alternatives are ordered in terms of accuracy, with the top (first) alternative being the most probable, as ranked by the recognizer.",
          "items": {
            "$ref": "SpeechRecognitionAlternative"
          }
        },
        "channelTag": {
          "type": "integer",
          "format": "int32",
          "description": "For multi-channel audio, this is the channel number corresponding to the recognized result for the audio from that channel. For `audio_channel_count` = `N`, its output values can range from `1` to `N`."
        },
        "resultEndOffset": {
          "type": "string",
          "format": "google-duration",
          "description": "Time offset of the end of this result relative to the beginning of the audio."
        },
        "languageCode": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the language in this result. This language code was detected to have the most likelihood of being spoken in the audio."
        }
      },
      "id": "SpeechRecognitionResult",
      "type": "object",
      "description": "A speech recognition result corresponding to a portion of the audio."
    },
    "CustomPromptConfig": {
      "id": "CustomPromptConfig",
      "type": "object",
      "description": "Configuration to enable custom prompt in chirp3.",
      "properties": {
        "customPrompt": {
          "description": "Optional. The custom instructions to override the existing instructions for chirp3.",
          "type": "string"
        }
      }
    },
    "ListPhraseSetsResponse": {
      "description": "Response message for the ListPhraseSets method.",
      "id": "ListPhraseSetsResponse",
      "type": "object",
      "properties": {
        "phraseSets": {
          "type": "array",
          "description": "The list of requested PhraseSets.",
          "items": {
            "$ref": "PhraseSet"
          }
        },
        "nextPageToken": {
          "type": "string",
          "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. This token expires after 72 hours."
        }
      }
    },
    "UpdateCustomClassRequest": {
      "id": "UpdateCustomClassRequest",
      "type": "object",
      "description": "Request message for the UpdateCustomClass method.",
      "properties": {
        "validateOnly": {
          "description": "If set, validate the request and preview the updated CustomClass, but do not actually update it.",
          "type": "boolean"
        },
        "customClass": {
          "description": "Required. The CustomClass to update. The CustomClass's `name` field is used to identify the CustomClass to update. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}`.",
          "$ref": "CustomClass"
        },
        "updateMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields to be updated. If empty, all fields are considered for update."
        }
      }
    },
    "GcsOutputConfig": {
      "description": "Output configurations for Cloud Storage.",
      "id": "GcsOutputConfig",
      "type": "object",
      "properties": {
        "uri": {
          "type": "string",
          "description": "The Cloud Storage URI prefix with which recognition results will be written."
        }
      }
    },
    "DeletePhraseSetRequest": {
      "properties": {
        "name": {
          "description": "Required. The name of the PhraseSet to delete. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}`",
          "type": "string"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the deleted PhraseSet, but do not actually delete it."
        },
        "allowMissing": {
          "description": "If set to true, and the PhraseSet is not found, the request will succeed and be a no-op (no Operation is recorded in this case).",
          "type": "boolean"
        },
        "etag": {
          "description": "This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      },
      "id": "DeletePhraseSetRequest",
      "type": "object",
      "description": "Request message for the DeletePhraseSet method."
    },
    "UpdateRecognizerRequest": {
      "id": "UpdateRecognizerRequest",
      "type": "object",
      "description": "Request message for the UpdateRecognizer method.",
      "properties": {
        "validateOnly": {
          "description": "If set, validate the request and preview the updated Recognizer, but do not actually update it.",
          "type": "boolean"
        },
        "updateMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields to update. If empty, all non-default valued fields are considered for update. Use `*` to update the entire Recognizer resource."
        },
        "recognizer": {
          "description": "Required. The Recognizer to update. The Recognizer's `name` field is used to identify the Recognizer to update. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`.",
          "$ref": "Recognizer"
        }
      }
    },
    "LanguageMetadata": {
      "properties": {
        "models": {
          "type": "object",
          "description": "Map of locale (language code) -\u003e models",
          "additionalProperties": {
            "$ref": "ModelMetadata"
          }
        }
      },
      "description": "The metadata about locales available in a given region. Currently this is just the models that are available for each locale",
      "id": "LanguageMetadata",
      "type": "object"
    },
    "TranslationConfig": {
      "properties": {
        "targetLanguage": {
          "type": "string",
          "description": "Required. The language code to translate to."
        }
      },
      "description": "Translation configuration. Use to translate the given audio into text for the desired language.",
      "id": "TranslationConfig",
      "type": "object"
    },
    "ListOperationsResponse": {
      "description": "The response message for Operations.ListOperations.",
      "id": "ListOperationsResponse",
      "type": "object",
      "properties": {
        "nextPageToken": {
          "description": "The standard List next-page token.",
          "type": "string"
        },
        "operations": {
          "type": "array",
          "description": "A list of operations that matches the specified filter in the request.",
          "items": {
            "$ref": "Operation"
          }
        },
        "unreachable": {
          "description": "Unordered list. Unreachable resources. Populated when the request sets `ListOperationsRequest.return_partial_success` and reads across collections. For example, when attempting to list all resources across all supported locations.",
          "items": {
            "type": "string"
          },
          "type": "array"
        }
      }
    },
    "Recognizer": {
      "properties": {
        "defaultRecognitionConfig": {
          "description": "Default configuration to use for requests with this Recognizer. This can be overwritten by inline configuration in the RecognizeRequest.config field.",
          "$ref": "RecognitionConfig"
        },
        "name": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. Identifier. The resource name of the Recognizer. Format: `projects/{project}/locations/{location}/recognizers/{recognizer}`."
        },
        "kmsKeyVersionName": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [KMS key version name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions) with which the Recognizer is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}`."
        },
        "languageCodes": {
          "description": "Optional. This field is now deprecated. Prefer the `language_codes` field in the `RecognitionConfig` message. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Supported languages for each model are listed in the [Table of Supported Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-supported-languages). If additional languages are provided, recognition result will contain recognition in the most likely language detected. The recognition result will include the language tag of the language detected in the audio. When you create or update a Recognizer, these values are stored in normalized BCP-47 form. For example, \"en-us\" is stored as \"en-US\".",
          "deprecated": true,
          "items": {
            "type": "string"
          },
          "type": "array"
        },
        "updateTime": {
          "readOnly": true,
          "description": "Output only. The most recent time this Recognizer was modified.",
          "type": "string",
          "format": "google-datetime"
        },
        "kmsKeyName": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. The [KMS key name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which the Recognizer is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`."
        },
        "model": {
          "description": "Optional. This field is now deprecated. Prefer the `model` field in the `RecognitionConfig` message. Which model to use for recognition requests. Select the model best suited to your domain to get best results. Guidance for choosing which model to use can be found in the [Transcription Models Documentation](https://cloud.google.com/speech-to-text/v2/docs/transcription-model) and the models supported in each region can be found in the [Table Of Supported Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-supported-languages).",
          "deprecated": true,
          "type": "string"
        },
        "createTime": {
          "readOnly": true,
          "description": "Output only. Creation time.",
          "type": "string",
          "format": "google-datetime"
        },
        "state": {
          "type": "string",
          "enum": [
            "STATE_UNSPECIFIED",
            "ACTIVE",
            "DELETED"
          ],
          "description": "Output only. The Recognizer lifecycle state.",
          "readOnly": true,
          "enumDescriptions": [
            "The default value. This value is used if the state is omitted.",
            "The Recognizer is active and ready for use.",
            "This Recognizer has been deleted."
          ]
        },
        "reconciling": {
          "readOnly": true,
          "description": "Output only. Whether or not this Recognizer is in the process of being updated.",
          "type": "boolean"
        },
        "expireTime": {
          "type": "string",
          "format": "google-datetime",
          "description": "Output only. The time at which this Recognizer will be purged.",
          "readOnly": true
        },
        "displayName": {
          "type": "string",
          "description": "User-settable, human-readable name for the Recognizer. Must be 63 characters or less."
        },
        "uid": {
          "description": "Output only. System-assigned unique identifier for the Recognizer.",
          "type": "string",
          "readOnly": true
        },
        "deleteTime": {
          "readOnly": true,
          "description": "Output only. The time at which this Recognizer was requested for deletion.",
          "type": "string",
          "format": "google-datetime"
        },
        "annotations": {
          "description": "Allows users to store small amounts of arbitrary data. Both the key and the value must be 63 characters or less each. At most 100 annotations.",
          "additionalProperties": {
            "type": "string"
          },
          "type": "object"
        },
        "etag": {
          "readOnly": true,
          "description": "Output only. This checksum is computed by the server based on the value of other fields. This may be sent on update, undelete, and delete requests to ensure the client has an up-to-date value before proceeding.",
          "type": "string"
        }
      },
      "description": "A Recognizer message. Stores recognition configuration and metadata.",
      "id": "Recognizer",
      "type": "object"
    },
    "BatchRecognizeFileResult": {
      "properties": {
        "inlineResult": {
          "description": "Recognition results. This is populated only when InlineOutputConfig is set in the RecognitionOutputConfig.",
          "$ref": "InlineResult"
        },
        "uri": {
          "type": "string",
          "description": "Deprecated. Use `cloud_storage_result.native_format_uri` instead.",
          "deprecated": true
        },
        "metadata": {
          "$ref": "RecognitionResponseMetadata"
        },
        "transcript": {
          "description": "Deprecated. Use `inline_result.transcript` instead.",
          "deprecated": true,
          "$ref": "BatchRecognizeResults"
        },
        "error": {
          "$ref": "Status",
          "description": "Error if one was encountered."
        },
        "cloudStorageResult": {
          "$ref": "CloudStorageResult",
          "description": "Recognition results written to Cloud Storage. This is populated only when GcsOutputConfig is set in the RecognitionOutputConfig."
        }
      },
      "description": "Final results for a single file.",
      "id": "BatchRecognizeFileResult",
      "type": "object"
    },
    "CreatePhraseSetRequest": {
      "description": "Request message for the CreatePhraseSet method.",
      "id": "CreatePhraseSetRequest",
      "type": "object",
      "properties": {
        "phraseSet": {
          "$ref": "PhraseSet",
          "description": "Required. The PhraseSet to create."
        },
        "phraseSetId": {
          "description": "The ID to use for the PhraseSet, which will become the final component of the PhraseSet's resource name. This value should be 4-63 characters, and valid characters are /a-z-/.",
          "type": "string"
        },
        "parent": {
          "description": "Required. The project and location where this PhraseSet will be created. The expected format is `projects/{project}/locations/{location}`.",
          "type": "string"
        },
        "validateOnly": {
          "type": "boolean",
          "description": "If set, validate the request and preview the PhraseSet, but do not actually create it."
        }
      }
    },
    "ModelFeatures": {
      "id": "ModelFeatures",
      "type": "object",
      "description": "Represents the collection of features belonging to a model",
      "properties": {
        "modelFeature": {
          "description": "Repeated field that contains all features of the model",
          "items": {
            "$ref": "ModelFeature"
          },
          "type": "array"
        }
      }
    },
    "RecognitionConfig": {
      "id": "RecognitionConfig",
      "type": "object",
      "description": "Provides information to the Recognizer that specifies how to process the recognition request.",
      "properties": {
        "explicitDecodingConfig": {
          "description": "Explicitly specified decoding parameters. Required if using headerless PCM audio (linear16, mulaw, alaw).",
          "$ref": "ExplicitDecodingConfig"
        },
        "languageCodes": {
          "description": "Optional. The language of the supplied audio as a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. Language tags are normalized to BCP-47 before they are used eg \"en-us\" becomes \"en-US\". Supported languages for each model are listed in the [Table of Supported Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-supported-languages). If additional languages are provided, recognition result will contain recognition in the most likely language detected. The recognition result will include the language tag of the language detected in the audio.",
          "items": {
            "type": "string"
          },
          "type": "array"
        },
        "adaptation": {
          "description": "Speech adaptation context that weights recognizer predictions for specific words and phrases.",
          "$ref": "SpeechAdaptation"
        },
        "denoiserConfig": {
          "description": "Optional. Optional denoiser config. May not be supported for all models and may have no effect.",
          "$ref": "DenoiserConfig"
        },
        "translationConfig": {
          "$ref": "TranslationConfig",
          "description": "Optional. Optional configuration used to automatically run translation on the given audio to the desired language for supported models."
        },
        "model": {
          "description": "Optional. Which model to use for recognition requests. Select the model best suited to your domain to get best results. Guidance for choosing which model to use can be found in the [Transcription Models Documentation](https://cloud.google.com/speech-to-text/v2/docs/transcription-model) and the models supported in each region can be found in the [Table Of Supported Models](https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-supported-languages).",
          "type": "string"
        },
        "autoDecodingConfig": {
          "description": "Automatically detect decoding parameters. Preferred for supported formats.",
          "$ref": "AutoDetectDecodingConfig"
        },
        "features": {
          "description": "Speech recognition features to enable.",
          "$ref": "RecognitionFeatures"
        },
        "transcriptNormalization": {
          "$ref": "TranscriptNormalization",
          "description": "Optional. Use transcription normalization to automatically replace parts of the transcript with phrases of your choosing. For StreamingRecognize, this normalization only applies to stable partial transcripts (stability \u003e 0.8) and final transcripts."
        }
      }
    },
    "CreateCustomClassRequest": {
      "id": "CreateCustomClassRequest",
      "type": "object",
      "description": "Request message for the CreateCustomClass method.",
      "properties": {
        "customClass": {
          "$ref": "CustomClass",
          "description": "Required. The CustomClass to create."
        },
        "customClassId": {
          "type": "string",
          "description": "The ID to use for the CustomClass, which will become the final component of the CustomClass's resource name. This value should be 4-63 characters, and valid characters are /a-z-/."
        },
        "parent": {
          "type": "string",
          "description": "Required. The project and location where this CustomClass will be created. The expected format is `projects/{project}/locations/{location}`."
        },
        "validateOnly": {
          "description": "If set, validate the request and preview the CustomClass, but do not actually create it.",
          "type": "boolean"
        }
      }
    },
    "Entry": {
      "description": "A single replacement configuration.",
      "id": "Entry",
      "type": "object",
      "properties": {
        "caseSensitive": {
          "type": "boolean",
          "description": "Whether the search is case sensitive."
        },
        "search": {
          "type": "string",
          "description": "What to replace. Max length is 100 characters."
        },
        "replace": {
          "description": "What to replace with. Max length is 100 characters.",
          "type": "string"
        }
      }
    },
    "NativeOutputFileFormatConfig": {
      "properties": {},
      "description": "Output configurations for serialized `BatchRecognizeResults` protos.",
      "id": "NativeOutputFileFormatConfig",
      "type": "object"
    },
    "BatchRecognizeFileMetadata": {
      "properties": {
        "uri": {
          "type": "string",
          "description": "Cloud Storage URI for the audio file."
        },
        "config": {
          "$ref": "RecognitionConfig",
          "description": "Features and audio metadata to use for the Automatic Speech Recognition. This field in combination with the config_mask field can be used to override parts of the default_recognition_config of the Recognizer resource as well as the config at the request level."
        },
        "configMask": {
          "type": "string",
          "format": "google-fieldmask",
          "description": "The list of fields in config that override the values in the default_recognition_config of the recognizer during this recognition request. If no mask is provided, all non-default valued fields in config override the values in the recognizer for this recognition request. If a mask is provided, only the fields listed in the mask override the config in the recognizer for this recognition request. If a wildcard (`*`) is provided, config completely overrides and replaces the config in the recognizer for this recognition request."
        }
      },
      "id": "BatchRecognizeFileMetadata",
      "type": "object",
      "description": "Metadata about a single file in a batch for BatchRecognize."
    },
    "Config": {
      "properties": {
        "name": {
          "readOnly": true,
          "type": "string",
          "description": "Output only. Identifier. The name of the config resource. There is exactly one config resource per project per location. The expected format is `projects/{project}/locations/{location}/config`."
        },
        "kmsKeyName": {
          "description": "Optional. An optional [KMS key name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) that if present, will be used to encrypt Speech-to-Text resources at-rest. Updating this key will not encrypt existing resources using this key; only new resources will be encrypted using this key. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.",
          "type": "string"
        },
        "updateTime": {
          "description": "Output only. The most recent time this resource was modified.",
          "type": "string",
          "format": "google-datetime",
          "readOnly": true
        }
      },
      "id": "Config",
      "type": "object",
      "description": "Message representing the config for the Speech-to-Text API. This includes an optional [KMS key](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which incoming data will be encrypted."
    },
    "RecognitionResponseMetadata": {
      "properties": {
        "prompt": {
          "type": "string",
          "description": "Optional. Output only. Provides the prompt used for the recognition request.",
          "readOnly": true
        },
        "requestId": {
          "description": "Global request identifier auto-generated by the API.",
          "type": "string"
        },
        "totalBilledDuration": {
          "type": "string",
          "format": "google-duration",
          "description": "When available, billed audio seconds for the corresponding request."
        }
      },
      "id": "RecognitionResponseMetadata",
      "type": "object",
      "description": "Metadata about the recognition request and response."
    },
    "BatchRecognizeResults": {
      "description": "Output type for Cloud Storage of BatchRecognize transcripts. Though this proto isn't returned in this API anywhere, the Cloud Storage transcripts will be this proto serialized and should be parsed as such.",
      "id": "BatchRecognizeResults",
      "type": "object",
      "properties": {
        "results": {
          "description": "Sequential list of transcription results corresponding to sequential portions of audio.",
          "items": {
            "$ref": "SpeechRecognitionResult"
          },
          "type": "array"
        },
        "metadata": {
          "description": "Metadata about the recognition.",
          "$ref": "RecognitionResponseMetadata"
        }
      }
    },
    "OutputFormatConfig": {
      "properties": {
        "native": {
          "$ref": "NativeOutputFileFormatConfig",
          "description": "Configuration for the native output format. If this field is set or if no other output format field is set, then transcripts will be written to the sink in the native format."
        },
        "srt": {
          "$ref": "SrtOutputFileFormatConfig",
          "description": "Configuration for the SRT output format. If this field is set, then transcripts will be written to the sink in the SRT format."
        },
        "vtt": {
          "$ref": "VttOutputFileFormatConfig",
          "description": "Configuration for the VTT output format. If this field is set, then transcripts will be written to the sink in the VTT format."
        }
      },
      "description": "Configuration for the format of the results stored to `output`.",
      "id": "OutputFormatConfig",
      "type": "object"
    },
    "BatchRecognizeMetadata": {
      "properties": {
        "transcriptionMetadata": {
          "type": "object",
          "description": "Map from provided filename to the transcription metadata for that file.",
          "additionalProperties": {
            "$ref": "BatchRecognizeTranscriptionMetadata"
          }
        }
      },
      "id": "BatchRecognizeMetadata",
      "type": "object",
      "description": "Operation metadata for BatchRecognize."
    },
    "ModelMetadata": {
      "description": "The metadata about the models in a given region for a specific locale. Currently this is just the features of the model",
      "id": "ModelMetadata",
      "type": "object",
      "properties": {
        "modelFeatures": {
          "type": "object",
          "description": "Map of the model name -\u003e features of that model",
          "additionalProperties": {
            "$ref": "ModelFeatures"
          }
        }
      }
    },
    "VttOutputFileFormatConfig": {
      "description": "Output configurations for [WebVTT](https://www.w3.org/TR/webvtt1/) formatted subtitle file.",
      "id": "VttOutputFileFormatConfig",
      "type": "object",
      "properties": {}
    },
    "ListRecognizersResponse": {
      "properties": {
        "recognizers": {
          "description": "The list of requested Recognizers.",
          "items": {
            "$ref": "Recognizer"
          },
          "type": "array"
        },
        "nextPageToken": {
          "type": "string",
          "description": "A token, which can be sent as page_token to retrieve the next page. If this field is omitted, there are no subsequent pages. This token expires after 72 hours."
        }
      },
      "description": "Response message for the ListRecognizers method.",
      "id": "ListRecognizersResponse",
      "type": "object"
    },
    "InlineOutputConfig": {
      "id": "InlineOutputConfig",
      "type": "object",
      "description": "Output configurations for inline response.",
      "properties": {}
    },
    "TranscriptNormalization": {
      "description": "Transcription normalization configuration. Use transcription normalization to automatically replace parts of the transcript with phrases of your choosing. For StreamingRecognize, this normalization only applies to stable partial transcripts (stability \u003e 0.8) and final transcripts.",
      "id": "TranscriptNormalization",
      "type": "object",
      "properties": {
        "entries": {
          "type": "array",
          "description": "A list of replacement entries. We will perform replacement with one entry at a time. For example, the second entry in [\"cat\" =\u003e \"dog\", \"mountain cat\" =\u003e \"mountain dog\"] will never be applied because we will always process the first entry before it. At most 100 entries.",
          "items": {
            "$ref": "Entry"
          }
        }
      }
    },
    "AccessMetadata": {
      "id": "AccessMetadata",
      "type": "object",
      "description": "The access metadata for a particular region. This can be applied if the org policy for the given project disallows a particular region.",
      "properties": {
        "constraintType": {
          "description": "Describes the different types of constraints that are applied.",
          "type": "string",
          "enum": [
            "CONSTRAINT_TYPE_UNSPECIFIED",
            "RESOURCE_LOCATIONS_ORG_POLICY_CREATE_CONSTRAINT"
          ],
          "enumDescriptions": [
            "Unspecified constraint applied.",
            "The project's org policy disallows the given region."
          ]
        }
      }
    },
    "SpeakerDiarizationConfig": {
      "id": "SpeakerDiarizationConfig",
      "type": "object",
      "description": "Configuration to enable speaker diarization.",
      "properties": {
        "minSpeakerCount": {
          "type": "integer",
          "format": "int32",
          "description": "Optional. The system automatically determines the number of speakers. This value is not currently used."
        },
        "maxSpeakerCount": {
          "type": "integer",
          "format": "int32",
          "description": "Optional. The system automatically determines the number of speakers. This value is not currently used."
        }
      }
    },
    "OperationMetadata": {
      "properties": {
        "batchRecognizeRequest": {
          "$ref": "BatchRecognizeRequest",
          "description": "The BatchRecognizeRequest that spawned the Operation."
        },
        "updateCustomClassRequest": {
          "description": "The UpdateCustomClassRequest that spawned the Operation.",
          "$ref": "UpdateCustomClassRequest"
        },
        "progressPercent": {
          "description": "The percent progress of the Operation. Values can range from 0-100. If the value is 100, then the operation is finished.",
          "type": "integer",
          "format": "int32"
        },
        "method": {
          "description": "The method that triggered the operation.",
          "type": "string"
        },
        "updateConfigRequest": {
          "description": "The UpdateConfigRequest that spawned the Operation.",
          "deprecated": true,
          "$ref": "UpdateConfigRequest"
        },
        "deletePhraseSetRequest": {
          "$ref": "DeletePhraseSetRequest",
          "description": "The DeletePhraseSetRequest that spawned the Operation."
        },
        "undeleteCustomClassRequest": {
          "description": "The UndeleteCustomClassRequest that spawned the Operation.",
          "$ref": "UndeleteCustomClassRequest"
        },
        "createCustomClassRequest": {
          "description": "The CreateCustomClassRequest that spawned the Operation.",
          "$ref": "CreateCustomClassRequest"
        },
        "deleteRecognizerRequest": {
          "description": "The DeleteRecognizerRequest that spawned the Operation.",
          "$ref": "DeleteRecognizerRequest"
        },
        "deleteCustomClassRequest": {
          "description": "The DeleteCustomClassRequest that spawned the Operation.",
          "$ref": "DeleteCustomClassRequest"
        },
        "undeletePhraseSetRequest": {
          "description": "The UndeletePhraseSetRequest that spawned the Operation.",
          "$ref": "UndeletePhraseSetRequest"
        },
        "batchRecognizeMetadata": {
          "description": "Metadata specific to the BatchRecognize method.",
          "$ref": "BatchRecognizeMetadata"
        },
        "kmsKeyVersionName": {
          "description": "The [KMS key version name](https://cloud.google.com/kms/docs/resource-hierarchy#key_versions) with which content of the Operation is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}`.",
          "type": "string"
        },
        "createPhraseSetRequest": {
          "$ref": "CreatePhraseSetRequest",
          "description": "The CreatePhraseSetRequest that spawned the Operation."
        },
        "updateTime": {
          "description": "The time the operation was last updated.",
          "type": "string",
          "format": "google-datetime"
        },
        "createTime": {
          "description": "The time the operation was created.",
          "type": "string",
          "format": "google-datetime"
        },
        "updateRecognizerRequest": {
          "$ref": "UpdateRecognizerRequest",
          "description": "The UpdateRecognizerRequest that spawned the Operation."
        },
        "kmsKeyName": {
          "description": "The [KMS key name](https://cloud.google.com/kms/docs/resource-hierarchy#keys) with which the content of the Operation is encrypted. The expected format is `projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}`.",
          "type": "string"
        },
        "resource": {
          "description": "The resource path for the target of the operation.",
          "type": "string"
        },
        "updatePhraseSetRequest": {
          "$ref": "UpdatePhraseSetRequest",
          "description": "The UpdatePhraseSetRequest that spawned the Operation."
        },
        "undeleteRecognizerRequest": {
          "$ref": "UndeleteRecognizerRequest",
          "description": "The UndeleteRecognizerRequest that spawned the Operation."
        },
        "createRecognizerRequest": {
          "$ref": "CreateRecognizerRequest",
          "description": "The CreateRecognizerRequest that spawned the Operation."
        }
      },
      "id": "OperationMetadata",
      "type": "object",
      "description": "Represents the metadata of a long-running operation."
    },
    "BatchRecognizeResponse": {
      "properties": {
        "results": {
          "description": "Map from filename to the final result for that file.",
          "additionalProperties": {
            "$ref": "BatchRecognizeFileResult"
          },
          "type": "object"
        },
        "totalBilledDuration": {
          "description": "When available, billed audio seconds for the corresponding request.",
          "type": "string",
          "format": "google-duration"
        }
      },
      "description": "Response message for BatchRecognize that is packaged into a longrunning Operation.",
      "id": "BatchRecognizeResponse",
      "type": "object"
    }
  },
  "batchPath": "batch",
  "auth": {
    "oauth2": {
      "scopes": {
        "https://www.googleapis.com/auth/cloud-platform": {
          "description": "See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account."
        }
      }
    }
  },
  "ownerName": "Google",
  "revision": "20260402",
  "icons": {
    "x16": "http://www.google.com/images/icons/product/search-16.gif",
    "x32": "http://www.google.com/images/icons/product/search-32.gif"
  },
  "protocol": "rest",
  "version": "v2",
  "id": "speech:v2",
  "name": "speech",
  "kind": "discovery#restDescription",
  "rootUrl": "https://speech.googleapis.com/",
  "mtlsRootUrl": "https://speech.mtls.googleapis.com/",
  "basePath": "",
  "version_module": true,
  "documentationLink": "https://cloud.google.com/speech-to-text/docs/quickstart-protocol",
  "discoveryVersion": "v1",
  "parameters": {
    "prettyPrint": {
      "location": "query",
      "type": "boolean",
      "default": "true",
      "description": "Returns response with indentations and line breaks."
    },
    "uploadType": {
      "location": "query",
      "type": "string",
      "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\")."
    },
    "$.xgafv": {
      "location": "query",
      "enumDescriptions": [
        "v1 error format",
        "v2 error format"
      ],
      "type": "string",
      "enum": [
        "1",
        "2"
      ],
      "description": "V1 error format."
    },
    "oauth_token": {
      "location": "query",
      "type": "string",
      "description": "OAuth 2.0 token for the current user."
    },
    "callback": {
      "location": "query",
      "type": "string",
      "description": "JSONP"
    },
    "fields": {
      "location": "query",
      "type": "string",
      "description": "Selector specifying which fields to include in a partial response."
    },
    "upload_protocol": {
      "location": "query",
      "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
      "type": "string"
    },
    "alt": {
      "default": "json",
      "description": "Data format for response.",
      "type": "string",
      "enum": [
        "json",
        "media",
        "proto"
      ],
      "enumDescriptions": [
        "Responses with Content-Type of application/json",
        "Media download with context-dependent Content-Type",
        "Responses with Content-Type of application/x-protobuf"
      ],
      "location": "query"
    },
    "quotaUser": {
      "location": "query",
      "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
      "type": "string"
    },
    "access_token": {
      "description": "OAuth access token.",
      "type": "string",
      "location": "query"
    },
    "key": {
      "location": "query",
      "type": "string",
      "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token."
    }
  }
}
