[[["Easy to understand","easyToUnderstand","thumb-up"],["Solved my problem","solvedMyProblem","thumb-up"],["Other","otherUp","thumb-up"]],[["Hard to understand","hardToUnderstand","thumb-down"],["Incorrect information or sample code","incorrectInformationOrSampleCode","thumb-down"],["Missing the information/samples I need","missingTheInformationSamplesINeed","thumb-down"],["Other","otherDown","thumb-down"]],["Last updated 2025-03-21 UTC."],[[["This webpage provides reference documentation for the `SafetySetting.Types.HarmBlockMethod` enum within the Google Cloud AI Platform v1 API, with the latest version being 3.22.0."],["The enum `SafetySetting.Types.HarmBlockMethod` offers options for managing harm blocking, including `Probability`, `Severity`, and `Unspecified`."],["The content includes a comprehensive list of documentation for many previous versions of the AI Platform V1, ranging from version 1.0.0 all the way to 3.22.0."],["The `Probability` method uses a probability score to determine harm, while the `Severity` method factors in both probability and severity scores, and `Unspecified` is for undefined methods."]]],[]]