From 411fceb8b593eb2f13cd22bbb1f1be169b012a9d Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Thu, 10 Oct 2024 20:41:12 -0400 Subject: [PATCH 1/3] [Vertex AI] Add `HarmBlockMethod` enum and `method` property --- FirebaseVertexAI/Sources/Safety.swift | 21 ++++++++++++++++++- .../Tests/Integration/IntegrationTests.swift | 8 +++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 2ff4fe85f1c..2fc35126d36 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -173,9 +173,24 @@ public struct SafetySetting { let rawValue: String } + /// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded. + public struct HarmBlockMethod: EncodableProtoEnum, Sendable { + enum Kind: String { + case severity = "SEVERITY" + case probability = "PROBABILITY" + } + + public static let severity = HarmBlockMethod(kind: .severity) + + public static let probability = HarmBlockMethod(kind: .probability) + + let rawValue: String + } + enum CodingKeys: String, CodingKey { case harmCategory = "category" case threshold + case method } /// The category this safety setting should be applied to. @@ -184,10 +199,14 @@ public struct SafetySetting { /// The threshold describing what content should be blocked. public let threshold: HarmBlockThreshold + public let method: HarmBlockMethod? + /// Initializes a new safety setting with the given category and threshold. - public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) { + public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold, + method: HarmBlockMethod? = nil) { self.harmCategory = harmCategory self.threshold = threshold + self.method = method } } diff --git a/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift b/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift index fee87108da7..b884a41e9a5 100644 --- a/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift +++ b/FirebaseVertexAI/Tests/Integration/IntegrationTests.swift @@ -30,8 +30,8 @@ final class IntegrationTests: XCTestCase { parts: "You are a friendly and helpful assistant." ) let safetySettings = [ - SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove), - SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove), + SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .probability), + SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove, method: .severity), SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockLowAndAbove), SafetySetting(harmCategory: .dangerousContent, threshold: .blockLowAndAbove), SafetySetting(harmCategory: .civicIntegrity, threshold: .blockLowAndAbove), @@ -89,11 +89,11 @@ final class IntegrationTests: XCTestCase { modelName: "gemini-1.5-pro", generationConfig: generationConfig, safetySettings: [ - SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove), + SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .severity), SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove), SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh), SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone), - SafetySetting(harmCategory: .civicIntegrity, threshold: .off), + SafetySetting(harmCategory: .civicIntegrity, threshold: .off, method: .probability), ], toolConfig: .init(functionCallingConfig: .auto()), systemInstruction: systemInstruction From a131daa04e3be9ff90ace17a23277fe0b9e1cbb4 Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 14 Oct 2024 16:14:35 -0400 Subject: [PATCH 2/3] Add documentation --- FirebaseVertexAI/Sources/Safety.swift | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/FirebaseVertexAI/Sources/Safety.swift b/FirebaseVertexAI/Sources/Safety.swift index 2fc35126d36..655046db98a 100644 --- a/FirebaseVertexAI/Sources/Safety.swift +++ b/FirebaseVertexAI/Sources/Safety.swift @@ -180,8 +180,10 @@ public struct SafetySetting { case probability = "PROBABILITY" } + /// Use both probability and severity scores. public static let severity = HarmBlockMethod(kind: .severity) + /// Use only the probability score. public static let probability = HarmBlockMethod(kind: .probability) let rawValue: String @@ -199,9 +201,20 @@ public struct SafetySetting { /// The threshold describing what content should be blocked. public let threshold: HarmBlockThreshold + /// The method of computing whether the ``threshold`` has been exceeded. public let method: HarmBlockMethod? /// Initializes a new safety setting with the given category and threshold. + /// + /// - Parameters: + /// - harmCategory: The category this safety setting should be applied to. + /// - threshold: The threshold describing what content should be blocked. + /// - method: The method of computing whether the threshold has been exceeded; if not specified, + /// the default method is ``HarmBlockMethod/severity`` for most models. See [harm block + /// methods](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#how_to_configure_safety_filters) + /// in the Google Cloud documentation for more details. + /// > Note: For models older than `gemini-1.5-flash` and `gemini-1.5-pro`, the default method + /// > is ``HarmBlockMethod/probability``. public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold, method: HarmBlockMethod? = nil) { self.harmCategory = harmCategory From b62631e77fd88b4734ec7aaa30ac83f82a5cfaeb Mon Sep 17 00:00:00 2001 From: Andrew Heard Date: Mon, 14 Oct 2024 16:21:08 -0400 Subject: [PATCH 3/3] Add CHANGELOG entry --- FirebaseVertexAI/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/FirebaseVertexAI/CHANGELOG.md b/FirebaseVertexAI/CHANGELOG.md index 346ef9a70bf..c3b6b5462ef 100644 --- a/FirebaseVertexAI/CHANGELOG.md +++ b/FirebaseVertexAI/CHANGELOG.md @@ -63,6 +63,9 @@ (#13875) - [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety filter. (#13863) +- [added] Added an optional `HarmBlockMethod` parameter `method` in + `SafetySetting` that configures whether responses are blocked based on the + `probability` and/or `severity` of content being in a `HarmCategory`. (#13876) - [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`, `.spii` and `.malformedFunctionCall` that may be reported. (#13860) - [added] Added new `BlockReason` values `.blocklist` and `.prohibitedContent`