Amazon Bedrock: Anthropic Claude: Chat

Amazon Bedrock: Anthropic Claude: Chat

Amazon Bedrock: Anthropic Claude: チャット

This item sends a message to an Anthropic Claude model on Amazon Bedrock and stores the response in the specified data item.

Auto Step icon
Basic Configs
Step Name
Note
Configs for this Auto Step
conf_AccessKey
C1: Access Key *
conf_SecretKey
C2: Secret Access Key *
conf_Region
C3: Region Code *
conf_Model
C4: Model *
conf_UseCrossRegion
C5: Use cross-region inference
conf_MaxTokens
C6: Maximum number of tokens to consume (2048 if blank)
conf_EnableThinking
C7E: Enable reasoning in Extended Thinking Mode
conf_BudgetTokens
C7E-b: Maximum number of tokens for reasoning (1024 if blank)
conf_EnableAdaptiveThinking
C7A: Enable reasoning in Adaptive Thinking Mode
conf_Effort
C8: Effort
conf_Temperature
C9: Temperature (0.0 – 1.0) (1.0 if blank)
conf_StopSequences
C10: Stop Sequences (write one per line)
conf_SystemPrompt
C11: System Prompt#{EL}
conf_Message1
U1: User Message *#{EL}
conf_Images1
I1: Images / PDFs to attach to user message
conf_Answer1
A1: Data item to save response *

Notes

  • To learn about how to create your Access Key and Secret Access Key, see the Amazon Web Services Documentation
  • To learn about cross-region inference, see the Amazon Web Services Documentation
    • You do not have to add a region prefix in [C4: Model]
    • When the model selected in [C4: Model] supports global cross-region inference and [C5: Use cross-region inference] is enabled, this item uses global cross-region inference
  • [C7E: Enable reasoning in Extended Thinking Mode], [C7A: Enable reasoning in Adaptive Thinking Mode], [C8: Effort], [C11: System Prompt] and [I1: Images / PDFs to attach to user message] are only supported on certain models
    • For more details, please refer to Anthropic Claude Documentation
    • Enabling both [C7E: Enable reasoning in Extended Thinking Mode] and [C7A: Enable reasoning in Adaptive Thinking Mode] causes this item to fail

Capture

See Also

Script (click to open)
  • An XML file that contains the code below is available to download
    • aws-bedrock-anthropic-claude-chat.xml (C) Questetra, Inc. (MIT License)
    • If you are using Professional, you can modify the contents of this file and use it as your own add-on auto step

const SERVICE = 'bedrock';
const ANTHROPIC_VERSION = 'bedrock-2023-05-31';

const DEFAULT = Object.freeze({
    'conf_MaxTokens': 2048,
    'conf_BudgetTokens': 1024,
    'conf_Temperature': 1,
});

const main = () => {
    ////// == 工程コンフィグ・ワークフローデータの参照 / Config & Data Retrieving ==
    const awsKey = configs.getObject('conf_AccessKey').getToken();
    const awsSecret = configs.getObject('conf_SecretKey').getToken();
    const region = retrieveRegion();
    let model = retrieveModel();
    const useCrossRegion = configs.getObject("conf_UseCrossRegion");
    if (useCrossRegion) {
        model = decideCrossRegionModel(model, region);
    }

    const maxTokens = retrieveMaxTokens('conf_MaxTokens');
    const enableThinking = configs.getObject('conf_EnableThinking');
    let budgetTokens;
    if (enableThinking) {
        budgetTokens = retrieveMaxTokens('conf_BudgetTokens');
        if (budgetTokens >= maxTokens) {
            throw new Error('Maximum number of tokens for reasoning must be less than maximum number of tokens to consume.');
        }
    }
    const enableAdaptiveThinking = configs.getObject('conf_EnableAdaptiveThinking');
    if (enableThinking && enableAdaptiveThinking) {
        throw new Error('Extended Thinking Mode and Adaptive Thinking Mode cannot be enabled at the same time.');
    }
    const effort = configs.get('conf_Effort');
    const temperature = retrieveTemperature();
    if ((enableThinking || enableAdaptiveThinking) && temperature !== DEFAULT['conf_Temperature']) {
        throw new Error(`Temperature must be ${DEFAULT['conf_Temperature']} when Extended or Adaptive Thinking Mode is enabled.`);
    }
    const stopSequences = retrieveStopSequences();
    const systemPrompt = configs.get('conf_SystemPrompt');
    const userMessage = retrieveUserMessage(); // 添付画像の取得も含む

    ////// == 演算 / Calculating ==
    const answer = converse(
        awsKey,
        awsSecret,
        region,
        model,
        maxTokens,
        enableThinking,
        budgetTokens,
        enableAdaptiveThinking,
        effort,
        temperature,
        stopSequences,
        systemPrompt,
        userMessage
    );

    ////// == ワークフローデータへの代入 / Data Updating ==
    saveData('conf_Answer1', answer);
}

/**
 * config からリージョンコードを読み出す
 * リージョンコードの形式として不正な場合はエラー
 * @return {String}
 */
const retrieveRegion = () => {
    const region = configs.get('conf_Region');
    // 今後リージョンが増えることも考えて、中央の最大文字数には余裕をみている
    const reg = new RegExp('^[a-z]{2}-[a-z]{4,16}-[1-9]$');
    if (!reg.test(region)) {
        throw new Error('Region Code is invalid.');
    }
    return region;
}

/**
 * config からモデル ID を読み出す
 * モデル ID として不正な場合はエラー
 * @return {String}
 */
const retrieveModel = () => {
    const model = configs.get('conf_Model');
    const reg = new RegExp('^[a-z0-9.:-]+$');
    if (!reg.test(model)) {
        throw new Error('Model is invalid. It contains an invalid character.');
    }
    const MODEL_PREFIX = 'anthropic.claude';
    if (!model.startsWith(MODEL_PREFIX)){
        throw new Error(`Model is invalid. It must start with "${MODEL_PREFIX}".`);
    }
    return model;
}

/* クロスリージョン推論に対応、グローバルクロスリージョン推論には非対応のモデルを特定する接頭辞 */
const GLOBAL_CROSS_REGION_UNSUPPORTED_MODEL_PREFIXES = [
    "anthropic.claude-3-",
    "anthropic.claude-3-5-",
    "anthropic.claude-3-7-",
    "anthropic.claude-opus-4-20250514-v1:0",
    "anthropic.claude-opus-4-1-20250805-v1:0"
];

/**
 * クロスリージョン推論モデル ID を決定する
 * グローバルクロスリージョン推論に非対応のモデルで、C3 のリージョンがクロスリージョン推論の対象でない場合はエラー
 * @return {String}
 */
const decideCrossRegionModel = (model, region) => {
    if (GLOBAL_CROSS_REGION_UNSUPPORTED_MODEL_PREFIXES.some(prefix => model.startsWith(prefix))) {
        if (region.startsWith('us-')) {
            return `us.${model}`;
        } else if(region.startsWith('eu-')) {
            return `eu.${model}`;
        } else if (region.startsWith('ap-')) {
            return `apac.${model}`;
        }
        throw new Error(`Cross-region inference is not supported in ${region}.`);
    }
    return `global.${model}`;
}

const MIN_BUDGET_TOKENS = 1024;

const ERROR_MSG = Object.freeze({
    'conf_MaxTokens': 'Maximum number of tokens to consume must be a positive integer.',
    'conf_BudgetTokens':
        `Maximum number of tokens for reasoning must be an integer greater than or equal to ${MIN_BUDGET_TOKENS}.`
});

/**
 * config から最大トークン数を読み出す
 * 指定なしの場合はデフォルト値を返す
 * @param confName 設定名
 * @returns {Number}
 */
const retrieveMaxTokens = (confName) => {
    const maxTokens = configs.get(confName);
    if (maxTokens === '') {
        return DEFAULT[confName];
    }
    const regExp = new RegExp(/^[1-9][0-9]*$/);
    if (!regExp.test(maxTokens)) {
        throw new Error(ERROR_MSG[confName]);
    }
    const maxTokensInt = parseInt(maxTokens, 10);
    if (confName === 'conf_BudgetTokens' && maxTokensInt < MIN_BUDGET_TOKENS) {
        throw new Error(ERROR_MSG[confName]);
    }
    return maxTokensInt;
};

/**
 * config から温度を読み出す
 * 指定なしの場合はデフォルト値(1)を返す
 * @returns {Number}
 */
const retrieveTemperature = () => {
    const temperature = configs.get('conf_Temperature');
    if (temperature === '') {
        return DEFAULT['conf_Temperature'];
    }
    const regExp = /^(0(\.\d+)?|1(\.0+)?)$/;
    if (!regExp.test(temperature)) {
        throw new Error('Temperature must be a number from 0 to 1.');
    }
    return parseFloat(temperature);
};

const MAX_STOP_SEQUENCES_NUM = 8191;

/**
 * config から停止シーケンスを読み出す
 * @returns {Array<String>}
 */
const retrieveStopSequences = () => {
    const stopSequencesStr = configs.get('conf_StopSequences');
    if (stopSequencesStr === '') {
        return [];
    }
    const stopSequences = stopSequencesStr.split('\n')
        .filter(s => s !== '');
    if (stopSequences.length > MAX_STOP_SEQUENCES_NUM) {
        throw new Error(`Too many stop sequences. The maximum number is ${MAX_STOP_SEQUENCES_NUM}.`);
    }
    return stopSequences;
};


const MAX_IMAGE_NUM = 20; // Bedrock Converse API の制限。1 ユーザメッセージにつき添付画像 20 個まで
const MAX_IMAGE_SIZE = 3932160; // Bedrock Converse API の制限。1 ファイルにつき 3.75 MB まで
const ALLOWED_MEDIA_TYPES = ['image/jpeg', 'image/png', 'image/gif', 'image/webp'];
const MAX_DOC_NUM = 5; // Bedrock Converse API の制限。1 ユーザメッセージにつき PDF 5 個まで
const MAX_DOC_SIZE = 4718592; // Bedrock Converse API の制限。1 ファイルにつき 4.5 MB まで
const ALLOWED_DOCUMENT_TYPES = ['application/pdf'];

/**
 * config からユーザメッセージ(添付画像、添付 PDF を含む)を読み出す
 * ユーザメッセージが空の場合、添付画像や添付 PDF が不正な場合はエラー
 * @returns {Object} ユーザメッセージオブジェクト
 */
const retrieveUserMessage = () => {
    const message = configs.get('conf_Message1');
    if (message === '') {
        throw new Error('User Message is empty.');
    }

    const {imageFiles, docFiles} = retrieveFiles(); // Content-Type による振り分けと、どちらにも該当しない場合はエラー
    validateFiles(imageFiles, 'image', MAX_IMAGE_NUM, MAX_IMAGE_SIZE);
    validateFiles(docFiles, 'PDF', MAX_DOC_NUM, MAX_DOC_SIZE);

    const inlineImages = imageFiles.map(file => ({
        image: fileToObj(file)
    }));

    const inlineDocs = docFiles.map((file, index) => ({
        document: {
            name: `DOC${index}`,
            ...fileToObj(file)
        }
    }));


    const userMessage = {
        role: 'user',
        content: [
            {
                text: message
            },
            ...inlineImages,
            ...inlineDocs
        ]
    };
    return userMessage;

};

/**
 * config からファイルを読み出し、画像と PDF に振り分ける
 * 以下の場合はエラー
 * - Content-Type が許可されていない場合
 * @returns {Array<Qfile>, Array<Qfile>} ファイルオブジェクトの配列
 */
const retrieveFiles = () => {
    const imageFiles = [];
    const docFiles = [];

    const filesDef = configs.getObject('conf_Images1');
    if (filesDef === null) {
        return {imageFiles, docFiles};
    }
    const files = engine.findData(filesDef);
    if (files === null) {
        return {imageFiles, docFiles};
    }

    files.forEach(file => {

        const contentType = file.getContentType().split(';')[0];

        if (ALLOWED_MEDIA_TYPES.includes(contentType)){
            imageFiles.push(file);
        } else if (ALLOWED_DOCUMENT_TYPES.includes(contentType)){
            docFiles.push(file);
        } else {
            throw new Error(`Content-Type of the attached file "${file.getName()}" is not allowed.`);
        }
    });

    return {imageFiles, docFiles};
};

/**
 * 振り分けられた画像配列と PDF 配列をチェックする
 * 以下の場合はエラー
 * - 添付ファイルの総数が多すぎる場合
 * - ファイルサイズが大きすぎる場合
 * @param {Array<Qfile>} ファイルオブジェクトの配列
 * @param errorLabel
 * @param maxNum
 * @param maxSize
 */
const validateFiles = (files, errorLabel, maxNum, maxSize) => {

    if (files.length > maxNum) {
        throw new Error(`Too many ${errorLabel}s attached. The maximum number is ${maxNum}.`);
    }

    files.forEach(file => {

        if (file.getLength() > maxSize) {
            throw new Error(`Attached ${errorLabel} file "${file.getName()}" is too large. The maximum size is ${maxSize} bytes.`);
        }
    });

}

/**
 * 振り分けられたファイル配列から inline コンテンツに必要な source, format を読み出す(画像、PDF 共通部分)
 * @param {Qfile} file  ファイルオブジェクト
 */
const fileToObj = file => ({
    format : file.getContentType().split('/')[1],
    source: {
        bytes: base64.encodeToString(fileRepository.readFile(file))
    }
});


/**
 * モデルの実行
 * @param awsKey
 * @param awsSecret
 * @param region
 * @param model
 * @param maxTokens
 * @param enableThinking
 * @param budgetTokens
 * @param enableAdaptiveThinking
 * @param effort
 * @param temperature
 * @param stopSequences
 * @param systemPrompt
 * @param userMessage
 * @returns {String} answer
 */
const converse = (
    awsKey,
    awsSecret,
    region,
    model,
    maxTokens,
    enableThinking,
    budgetTokens,
    enableAdaptiveThinking,
    effort,
    temperature,
    stopSequences,
    systemPrompt,
    userMessage
) => {
    const URL = `https://bedrock-runtime.${region}.amazonaws.com/model/${model}/converse`;
    const payload = {
        anthropic_version: ANTHROPIC_VERSION,
        inferenceConfig: {
            maxTokens,
            stopSequences,
            temperature
        },

        messages: [userMessage]
    };

    if (enableThinking) {
        Object.assign(payload, {
            additionalModelRequestFields: {
                thinking: {
                    type: 'enabled',
                    budget_tokens: budgetTokens
                }
            }
        });
    }

    if (enableAdaptiveThinking) {
        Object.assign(payload, {
            additionalModelRequestFields: {
                thinking: {
                    type: 'adaptive'
                }
            }
        });
    }

    if (effort !== null && effort !== '') {
        if (payload.additionalModelRequestFields === undefined) {
            payload.additionalModelRequestFields = {};
        }
        Object.assign(payload.additionalModelRequestFields, {
            anthropic_beta: ['effort-2025-11-24'], // 4.5 系のモデルで必要
            output_config: {effort}
        });
    }

    if (systemPrompt !== '') {
        Object.assign(payload, {
            system: [
                {text :`${systemPrompt}`}
            ]
        });
    }


    const response = httpClient.begin()
        .awsSignV4(awsKey, awsSecret, region, SERVICE)
        .body(JSON.stringify(payload), 'application/json')
        .post(URL);

    const status = response.getStatusCode();
    const respTxt = response.getResponseAsString();
    if (status !== 200) {
        engine.log(respTxt);
        throw new Error(`Failed to converse. status: ${status}`);
    }
    const {output, stopReason, usage} = JSON.parse(respTxt);
    const reasoningContentItem = output.message.content.find(content => content.reasoningContent !== undefined);
    const reasoningText = reasoningContentItem?.reasoningContent?.reasoningText?.text;
    if (reasoningText !== undefined) {
        engine.log(`Thinking: ${reasoningText}`);
    }
    engine.log(`Stop Reason: ${stopReason}`);
    engine.log(`Input Tokens: ${usage.inputTokens}`);
    engine.log(`Output Tokens: ${usage.outputTokens}`);

    // text プロパティを持つ content をすべて連結して回答とする
    const answer1 = output.message.content
        .filter(content => content.text !== undefined)
        .map(content => content.text)
        .join('');
    if (answer1 === null || answer1.trim() === '') {
        throw new Error(`No response content generated. Stop Reason: ${stopReason}`);
    }
    return answer1;
};

/**
 * データ項目への保存
 * @param configName
 * @param data
 */
const saveData = (configName, data) => {
    const def = configs.getObject(configName);
    if (def === null) {
        return;
    }
    engine.setData(def, data);
};

        
Scroll to Top

Discover more from Questetra Support

Subscribe now to keep reading and get access to the full archive.

Continue reading