node.jsgoogle-cloud-vertex-aigoogle-gemini

hot to specify systemInstruction when sending a request to gemini API


how do i specify systemInstruction when sending a request to gemini API with vertex ai. The vertex ai playground show a way to do it with python but not with node js.

The specified python method:

def multiturn_generate_content():
  vertexai.init(project="myProjectId", location="us-central1")
  model = GenerativeModel(
    "gemini-1.0-pro-002",
    system_instruction=["""you are an ai assistant"""]
  )
  chat = model.start_chat()
  
  #rest of the code

multiturn_generate_content()

What i tried with node js:

Note it errors out with: ClientError: [VertexAI.ClientError]: No content is provided for sending chat message.


const { VertexAI } = require('@google-cloud/vertexai')

let generativeModel;

const projectId = 'myProjectId';
const location = 'us-central1';
const model = 'gemini-1.0-pro-002';

function initializeGenerativeModel() {
    try {
        const vertex_ai = new VertexAI({ project: projectId, location: location, model: model });

        generativeModel = vertex_ai.preview.getGenerativeModel({
            model: model,
            generationConfig: {
                'maxOutputTokens': 8192,
                'temperature': 1,
                'topP': 0.95,
            },
            safetySettings: [
                {
                    'category': 'HARM_CATEGORY_HATE_SPEECH',
                    'threshold': 'BLOCK_MEDIUM_AND_ABOVE'
                },
                {
                    'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',
                    'threshold': 'BLOCK_MEDIUM_AND_ABOVE'
                },
                {
                    'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
                    'threshold': 'BLOCK_MEDIUM_AND_ABOVE'
                },
                {
                    'category': 'HARM_CATEGORY_HARASSMENT',
                    'threshold': 'BLOCK_MEDIUM_AND_ABOVE'
                }
            ],
        });
    } catch (error) {
        console.error('Error initializing VertexAI:', error);
        throw error;
    }
}

initializeGenerativeModel();

async function gemini_1_0_pro(query) {
  try {
      const chat = generativeModel.startChat();
      const systemInstruction = "You are an ai assistant";
      const result = await chat.sendMessageStream({
          role: 'system',
          content: systemInstruction
      });

      await chat.sendMessageStream({
          role: 'user',
          content: query
      });

      for await (const item of result.stream) {
          const response = item.candidates[0].content.parts[0].text;
          console.log(response)
      }
      const aggregatedResponse = await result.response;
      console.log(aggregatedResponse)
  } catch (error) {
      console.error('Error in gemini_1_0_pro:', error);
  }
}

module.exports = { gemini_1_0_pro };


Solution

  • Okay, apologies for my misunderstanding.

    It appears you can either:

    const generativeModel = vertexAI.getGenerativeModel({
      ...
      systemInstruction: {
        role: "system",
        parts: [{text: "You are an AI assistant"}]
      }
    });
    

    Or

    const chat = generativeModel.startChat({
      systemInstruction: {
        role: "system",
        parts: [{text: "You are an AI assistant"}]
      }
    });
    

    I found the following useful:

    And I've finally (!) found the API documentation which confirms this: