🤖

OpenAI

Advanced AI capabilities including GPT models, embeddings, and image generation with DALL-E.

AI/ML99% popularityDocumentation

Features

GPT-4 Chat Completions
Text Embeddings
DALL-E Image Generation
Whisper Speech-to-Text
Function Calling
Fine-tuning Support

Code Example

// OpenAI API Integration
const OpenAI = require('openai');

class OpenAIService {
  constructor(apiKey) {
    this.openai = new OpenAI({
      apiKey: apiKey
    });
  }

  // Generate text completion using GPT
  async generateCompletion(prompt, options = {}) {
    try {
      const completion = await this.openai.chat.completions.create({
        model: options.model || 'gpt-4',
        messages: [
          {
            role: 'system',
            content: options.systemMessage || 'You are a helpful assistant.'
          },
          {
            role: 'user',
            content: prompt
          }
        ],
        max_tokens: options.maxTokens || 150,
        temperature: options.temperature || 0.7,
        top_p: options.topP || 1,
        frequency_penalty: options.frequencyPenalty || 0,
        presence_penalty: options.presencePenalty || 0
      });

      return {
        success: true,
        text: completion.choices[0].message.content,
        usage: completion.usage,
        model: completion.model
      };
    } catch (error) {
      console.error('OpenAI Completion Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Generate streaming completion
  async generateStreamingCompletion(prompt, onChunk, options = {}) {
    try {
      const stream = await this.openai.chat.completions.create({
        model: options.model || 'gpt-4',
        messages: [
          {
            role: 'system',
            content: options.systemMessage || 'You are a helpful assistant.'
          },
          {
            role: 'user',
            content: prompt
          }
        ],
        max_tokens: options.maxTokens || 150,
        temperature: options.temperature || 0.7,
        stream: true
      });

      let fullText = '';
      
      for await (const chunk of stream) {
        const content = chunk.choices[0]?.delta?.content || '';
        fullText += content;
        
        if (onChunk) {
          onChunk(content, fullText);
        }
      }

      return {
        success: true,
        text: fullText
      };
    } catch (error) {
      console.error('OpenAI Streaming Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Generate text embeddings
  async generateEmbeddings(texts, options = {}) {
    try {
      const textsArray = Array.isArray(texts) ? texts : [texts];
      
      const response = await this.openai.embeddings.create({
        model: options.model || 'text-embedding-ada-002',
        input: textsArray
      });

      return {
        success: true,
        embeddings: response.data.map(item => item.embedding),
        usage: response.usage
      };
    } catch (error) {
      console.error('OpenAI Embeddings Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Generate images with DALL-E
  async generateImage(prompt, options = {}) {
    try {
      const response = await this.openai.images.generate({
        model: options.model || 'dall-e-3',
        prompt: prompt,
        n: options.n || 1,
        size: options.size || '1024x1024',
        quality: options.quality || 'standard',
        style: options.style || 'vivid'
      });

      return {
        success: true,
        images: response.data.map(item => ({
          url: item.url,
          revised_prompt: item.revised_prompt
        }))
      };
    } catch (error) {
      console.error('OpenAI Image Generation Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Transcribe audio with Whisper
  async transcribeAudio(audioFile, options = {}) {
    try {
      const transcription = await this.openai.audio.transcriptions.create({
        file: audioFile,
        model: options.model || 'whisper-1',
        language: options.language,
        prompt: options.prompt,
        response_format: options.responseFormat || 'json',
        temperature: options.temperature || 0
      });

      return {
        success: true,
        text: transcription.text,
        language: transcription.language
      };
    } catch (error) {
      console.error('OpenAI Transcription Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Generate speech from text
  async generateSpeech(text, options = {}) {
    try {
      const mp3 = await this.openai.audio.speech.create({
        model: options.model || 'tts-1',
        voice: options.voice || 'alloy',
        input: text,
        response_format: options.responseFormat || 'mp3',
        speed: options.speed || 1.0
      });

      const buffer = Buffer.from(await mp3.arrayBuffer());
      
      return {
        success: true,
        audio: buffer
      };
    } catch (error) {
      console.error('OpenAI Speech Generation Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Function calling with GPT
  async callFunction(prompt, functions, options = {}) {
    try {
      const completion = await this.openai.chat.completions.create({
        model: options.model || 'gpt-4',
        messages: [
          {
            role: 'system',
            content: options.systemMessage || 'You are a helpful assistant that can call functions.'
          },
          {
            role: 'user',
            content: prompt
          }
        ],
        functions: functions,
        function_call: options.functionCall || 'auto',
        temperature: options.temperature || 0.1
      });

      const message = completion.choices[0].message;

      if (message.function_call) {
        return {
          success: true,
          functionCall: {
            name: message.function_call.name,
            arguments: JSON.parse(message.function_call.arguments)
          },
          message: message.content,
          usage: completion.usage
        };
      }

      return {
        success: true,
        message: message.content,
        usage: completion.usage
      };
    } catch (error) {
      console.error('OpenAI Function Call Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }

  // Chat with conversation history
  async chat(messages, options = {}) {
    try {
      const completion = await this.openai.chat.completions.create({
        model: options.model || 'gpt-4',
        messages: messages,
        max_tokens: options.maxTokens || 150,
        temperature: options.temperature || 0.7
      });

      const responseMessage = completion.choices[0].message;

      return {
        success: true,
        message: responseMessage,
        usage: completion.usage,
        model: completion.model
      };
    } catch (error) {
      console.error('OpenAI Chat Error:', error);
      return {
        success: false,
        error: error.message
      };
    }
  }
}

// Usage examples
const openai = new OpenAIService('your-openai-api-key');

// Generate a completion
async function completionExample() {
  const result = await openai.generateCompletion(
    'Write a short story about a robot learning to paint.',
    {
      model: 'gpt-4',
      maxTokens: 200,
      temperature: 0.8
    }
  );

  if (result.success) {
    console.log('Generated text:', result.text);
    console.log('Tokens used:', result.usage);
  } else {
    console.error('Error:', result.error);
  }
}

// Generate embeddings
async function embeddingsExample() {
  const texts = [
    'The quick brown fox jumps over the lazy dog',
    'Machine learning is a subset of artificial intelligence',
    'JavaScript is a programming language'
  ];

  const result = await openai.generateEmbeddings(texts);

  if (result.success) {
    console.log('Embeddings generated:', result.embeddings.length);
    console.log('First embedding dimension:', result.embeddings[0].length);
  }
}

// Generate image
async function imageExample() {
  const result = await openai.generateImage(
    'A serene landscape with mountains and a lake at sunset',
    {
      size: '1024x1024',
      quality: 'hd',
      n: 1
    }
  );

  if (result.success) {
    console.log('Image URL:', result.images[0].url);
  }
}

// Streaming completion
async function streamingExample() {
  console.log('Streaming response:');
  
  const result = await openai.generateStreamingCompletion(
    'Explain quantum computing in simple terms.',
    (chunk, fullText) => {
      process.stdout.write(chunk);
    },
    {
      model: 'gpt-4',
      maxTokens: 200
    }
  );

  console.log('\nStreaming complete:', result.success);
}

// Function calling example
async function functionCallExample() {
  const functions = [
    {
      name: 'get_weather',
      description: 'Get the current weather for a location',
      parameters: {
        type: 'object',
        properties: {
          location: {
            type: 'string',
            description: 'The city and state, e.g. San Francisco, CA'
          }
        },
        required: ['location']
      }
    }
  ];

  const result = await openai.callFunction(
    'What's the weather like in New York?',
    functions
  );

  if (result.success && result.functionCall) {
    console.log('Function to call:', result.functionCall.name);
    console.log('Arguments:', result.functionCall.arguments);
  }
}

completionExample();