Quick Start
Create a new React Native AI chat app:
npx create-expo-app my-ai-chat --template blank-typescript cd my-ai-chat npm install axios react-native-reanimated zustand npm install @react-native-voice/voice expo-speech
1. Project Setup & Architecture
Environment Configuration
// app.config.ts
export default {
expo: {
name: "AI Chat App",
slug: "ai-chat-app",
version: "1.0.0",
orientation: "portrait",
icon: "./assets/icon.png",
userInterfaceStyle: "automatic",
splash: {
image: "./assets/splash.png",
resizeMode: "contain",
backgroundColor: "#000000"
},
ios: {
supportsTablet: true,
bundleIdentifier: "com.yourcompany.aichat",
infoPlist: {
NSMicrophoneUsageDescription: "This app uses the microphone for voice input",
NSPhotoLibraryUsageDescription: "This app accesses photos for image analysis"
}
},
android: {
adaptiveIcon: {
foregroundImage: "./assets/adaptive-icon.png",
backgroundColor: "#000000"
},
package: "com.yourcompany.aichat",
permissions: ["RECORD_AUDIO", "READ_EXTERNAL_STORAGE"]
},
extra: {
eas: {
projectId: "your-project-id"
}
}
}
}Project Structure
src/
├── api/
│ ├── client.ts # API client configuration
│ ├── llm.ts # LLM service abstraction
│ └── types.ts # TypeScript types
├── components/
│ ├── ChatMessage.tsx # Message bubble component
│ ├── ChatInput.tsx # Input with voice button
│ ├── StreamingText.tsx # Animated streaming text
│ └── ImagePicker.tsx # Image selection component
├── screens/
│ ├── ChatScreen.tsx # Main chat interface
│ ├── SettingsScreen.tsx # API key management
│ └── HistoryScreen.tsx # Conversation history
├── store/
│ ├── chatStore.ts # Zustand chat state
│ └── settingsStore.ts # Settings persistence
├── services/
│ ├── voice.ts # Voice I/O service
│ ├── storage.ts # Offline storage
│ └── notifications.ts # Push notifications
└── utils/
├── constants.ts # App constants
└── helpers.ts # Utility functionsAPI Client Setup
// src/api/client.ts
import axios from 'axios'
import { getSettingsStore } from '../store/settingsStore'
const API_BASE_URL = 'https://api.parrotrouter.com/v1'
export const apiClient = axios.create({
baseURL: API_BASE_URL,
timeout: 30000,
headers: {
'Content-Type': 'application/json',
},
})
// Add auth interceptor
apiClient.interceptors.request.use((config) => {
const apiKey = getSettingsStore().apiKey
if (apiKey) {
config.headers.Authorization = `Bearer ${apiKey}`
}
return config
})
// Add response interceptor for error handling
apiClient.interceptors.response.use(
(response) => response,
async (error) => {
if (error.response?.status === 401) {
// Handle auth error
console.error('Authentication failed')
}
return Promise.reject(error)
}
)2. Building the Chat Interface
Main Chat Screen
// src/screens/ChatScreen.tsx
import React, { useRef, useEffect } from 'react'
import {
View,
FlatList,
KeyboardAvoidingView,
Platform,
StyleSheet,
SafeAreaView,
} from 'react-native'
import { useChatStore } from '../store/chatStore'
import ChatMessage from '../components/ChatMessage'
import ChatInput from '../components/ChatInput'
import { Message } from '../api/types'
export default function ChatScreen() {
const { messages, sendMessage, isLoading } = useChatStore()
const flatListRef = useRef<FlatList>(null)
useEffect(() => {
// Scroll to bottom when new message arrives
if (messages.length > 0) {
flatListRef.current?.scrollToEnd({ animated: true })
}
}, [messages])
const renderMessage = ({ item }: { item: Message }) => (
<ChatMessage message={item} />
)
return (
<SafeAreaView style={styles.container}>
<KeyboardAvoidingView
style={styles.container}
behavior={Platform.OS === 'ios' ? 'padding' : 'height'}
keyboardVerticalOffset={90}
>
<FlatList
ref={flatListRef}
data={messages}
renderItem={renderMessage}
keyExtractor={(item) => item.id}
contentContainerStyle={styles.messagesList}
onContentSizeChange={() => flatListRef.current?.scrollToEnd()}
/>
<ChatInput onSend={sendMessage} isLoading={isLoading} />
</KeyboardAvoidingView>
</SafeAreaView>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#000',
},
messagesList: {
paddingHorizontal: 16,
paddingBottom: 16,
},
})Message Component with Streaming
// src/components/ChatMessage.tsx
import React from 'react'
import { View, Text, StyleSheet, Animated } from 'react-native'
import { Message } from '../api/types'
import StreamingText from './StreamingText'
interface Props {
message: Message
}
export default function ChatMessage({ message }: Props) {
const isUser = message.role === 'user'
return (
<View style={[
styles.container,
isUser ? styles.userMessage : styles.assistantMessage
]}>
{message.isStreaming ? (
<StreamingText text={message.content} />
) : (
<Text style={[
styles.text,
isUser ? styles.userText : styles.assistantText
]}>
{message.content}
</Text>
)}
{message.image && (
<Image source={{ uri: message.image }} style={styles.image} />
)}
</View>
)
}
const styles = StyleSheet.create({
container: {
maxWidth: '80%',
marginVertical: 4,
padding: 12,
borderRadius: 16,
},
userMessage: {
alignSelf: 'flex-end',
backgroundColor: '#0084ff',
},
assistantMessage: {
alignSelf: 'flex-start',
backgroundColor: '#333',
},
text: {
fontSize: 16,
lineHeight: 22,
},
userText: {
color: '#fff',
},
assistantText: {
color: '#fff',
},
image: {
width: 200,
height: 200,
borderRadius: 8,
marginTop: 8,
},
})3. Streaming Text Responses
LLM Service with Streaming
// src/api/llm.ts
import { apiClient } from './client'
import { Message } from './types'
export interface StreamCallbacks {
onToken: (token: string) => void
onComplete: () => void
onError: (error: Error) => void
}
export class LLMService {
async streamChat(
messages: Message[],
callbacks: StreamCallbacks,
options: {
model?: string
temperature?: number
maxTokens?: number
} = {}
) {
try {
const response = await fetch(`${apiClient.defaults.baseURL}/chat/stream`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': apiClient.defaults.headers.Authorization,
},
body: JSON.stringify({
messages: messages.map(m => ({
role: m.role,
content: m.content,
})),
model: options.model || 'gpt-3.5-turbo',
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens || 1000,
stream: true,
}),
})
if (!response.body) {
throw new Error('No response body')
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6)
if (data === '[DONE]') {
callbacks.onComplete()
return
}
try {
const parsed = JSON.parse(data)
const token = parsed.choices?.[0]?.delta?.content
if (token) {
callbacks.onToken(token)
}
} catch (e) {
console.error('Failed to parse stream data:', e)
}
}
}
}
} catch (error) {
callbacks.onError(error as Error)
}
}
// Non-streaming fallback
async chat(messages: Message[], options: any = {}) {
const response = await apiClient.post('/chat', {
messages,
...options,
})
return response.data
}
}Streaming Text Component
// src/components/StreamingText.tsx
import React, { useEffect, useState } from 'react'
import { Text, Animated } from 'react-native'
interface Props {
text: string
style?: any
}
export default function StreamingText({ text, style }: Props) {
const [displayText, setDisplayText] = useState('')
const [cursorOpacity] = useState(new Animated.Value(1))
useEffect(() => {
// Cursor blinking animation
Animated.loop(
Animated.sequence([
Animated.timing(cursorOpacity, {
toValue: 0,
duration: 500,
useNativeDriver: true,
}),
Animated.timing(cursorOpacity, {
toValue: 1,
duration: 500,
useNativeDriver: true,
}),
])
).start()
}, [])
useEffect(() => {
// Simulate typing effect
if (displayText.length < text.length) {
const timer = setTimeout(() => {
setDisplayText(text.slice(0, displayText.length + 1))
}, 10)
return () => clearTimeout(timer)
}
}, [displayText, text])
return (
<Text style={style}>
{displayText}
<Animated.Text style={{ opacity: cursorOpacity }}>▊</Animated.Text>
</Text>
)
}4. Offline Support & Caching
Storage Service
// src/services/storage.ts
import AsyncStorage from '@react-native-async-storage/async-storage'
import * as FileSystem from 'expo-file-system'
import { Message, Conversation } from '../api/types'
const CONVERSATIONS_KEY = '@conversations'
const CACHE_DIR = FileSystem.documentDirectory + 'cache/'
export class StorageService {
// Initialize cache directory
async init() {
const dirInfo = await FileSystem.getInfoAsync(CACHE_DIR)
if (!dirInfo.exists) {
await FileSystem.makeDirectoryAsync(CACHE_DIR, { intermediates: true })
}
}
// Save conversation
async saveConversation(conversation: Conversation) {
try {
const conversations = await this.getConversations()
const index = conversations.findIndex(c => c.id === conversation.id)
if (index >= 0) {
conversations[index] = conversation
} else {
conversations.push(conversation)
}
await AsyncStorage.setItem(CONVERSATIONS_KEY, JSON.stringify(conversations))
} catch (error) {
console.error('Failed to save conversation:', error)
}
}
// Get all conversations
async getConversations(): Promise<Conversation[]> {
try {
const data = await AsyncStorage.getItem(CONVERSATIONS_KEY)
return data ? JSON.parse(data) : []
} catch (error) {
console.error('Failed to get conversations:', error)
return []
}
}
// Cache response for offline access
async cacheResponse(prompt: string, response: string) {
const hash = await this.hashString(prompt)
const filePath = CACHE_DIR + hash + '.json'
await FileSystem.writeAsStringAsync(filePath, JSON.stringify({
prompt,
response,
timestamp: Date.now(),
}))
}
// Get cached response
async getCachedResponse(prompt: string): Promise<string | null> {
const hash = await this.hashString(prompt)
const filePath = CACHE_DIR + hash + '.json'
try {
const fileInfo = await FileSystem.getInfoAsync(filePath)
if (!fileInfo.exists) return null
const data = await FileSystem.readAsStringAsync(filePath)
const cached = JSON.parse(data)
// Check if cache is still valid (24 hours)
if (Date.now() - cached.timestamp > 24 * 60 * 60 * 1000) {
await FileSystem.deleteAsync(filePath)
return null
}
return cached.response
} catch (error) {
return null
}
}
// Simple hash function for cache keys
private async hashString(str: string): Promise<string> {
let hash = 0
for (let i = 0; i < str.length; i++) {
const char = str.charCodeAt(i)
hash = ((hash << 5) - hash) + char
hash = hash & hash // Convert to 32-bit integer
}
return Math.abs(hash).toString(36)
}
// Clear old cache files
async clearOldCache(daysOld: number = 7) {
const files = await FileSystem.readDirectoryAsync(CACHE_DIR)
const cutoff = Date.now() - (daysOld * 24 * 60 * 60 * 1000)
for (const file of files) {
const filePath = CACHE_DIR + file
const info = await FileSystem.getInfoAsync(filePath)
if (info.modificationTime && info.modificationTime < cutoff) {
await FileSystem.deleteAsync(filePath)
}
}
}
}Offline-First Chat Store
// src/store/chatStore.ts
import { create } from 'zustand'
import { persist, createJSONStorage } from 'zustand/middleware'
import AsyncStorage from '@react-native-async-storage/async-storage'
import { Message, Conversation } from '../api/types'
import { LLMService } from '../api/llm'
import { StorageService } from '../services/storage'
interface ChatState {
conversations: Conversation[]
currentConversation: Conversation | null
messages: Message[]
isLoading: boolean
isOnline: boolean
sendMessage: (content: string, image?: string) => Promise<void>
loadConversation: (id: string) => void
createNewConversation: () => void
setOnlineStatus: (status: boolean) => void
}
const llmService = new LLMService()
const storageService = new StorageService()
export const useChatStore = create<ChatState>()(
persist(
(set, get) => ({
conversations: [],
currentConversation: null,
messages: [],
isLoading: false,
isOnline: true,
sendMessage: async (content: string, image?: string) => {
const { messages, isOnline } = get()
// Add user message
const userMessage: Message = {
id: Date.now().toString(),
role: 'user',
content,
image,
timestamp: new Date(),
}
set({
messages: [...messages, userMessage],
isLoading: true,
})
// Check cache first if offline
if (!isOnline) {
const cachedResponse = await storageService.getCachedResponse(content)
if (cachedResponse) {
const assistantMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: cachedResponse,
timestamp: new Date(),
isCached: true,
}
set({
messages: [...get().messages, assistantMessage],
isLoading: false,
})
return
}
}
// Create assistant message for streaming
const assistantMessage: Message = {
id: (Date.now() + 1).toString(),
role: 'assistant',
content: '',
timestamp: new Date(),
isStreaming: true,
}
set({
messages: [...get().messages, assistantMessage],
})
// Stream response
try {
await llmService.streamChat(
[...messages, userMessage],
{
onToken: (token) => {
set((state) => ({
messages: state.messages.map(m =>
m.id === assistantMessage.id
? { ...m, content: m.content + token }
: m
),
}))
},
onComplete: async () => {
const finalMessages = get().messages
const finalMessage = finalMessages.find(m => m.id === assistantMessage.id)
if (finalMessage) {
// Cache the response
await storageService.cacheResponse(content, finalMessage.content)
set({
messages: finalMessages.map(m =>
m.id === assistantMessage.id
? { ...m, isStreaming: false }
: m
),
isLoading: false,
})
}
},
onError: (error) => {
console.error('Stream error:', error)
set({
messages: get().messages.map(m =>
m.id === assistantMessage.id
? { ...m, content: 'Error: Failed to get response', isStreaming: false }
: m
),
isLoading: false,
})
},
}
)
} catch (error) {
console.error('Chat error:', error)
set({ isLoading: false })
}
},
loadConversation: (id: string) => {
const conversation = get().conversations.find(c => c.id === id)
if (conversation) {
set({
currentConversation: conversation,
messages: conversation.messages || [],
})
}
},
createNewConversation: () => {
const newConversation: Conversation = {
id: Date.now().toString(),
title: 'New Chat',
createdAt: new Date(),
updatedAt: new Date(),
messages: [],
}
set({
conversations: [...get().conversations, newConversation],
currentConversation: newConversation,
messages: [],
})
},
setOnlineStatus: (status: boolean) => {
set({ isOnline: status })
},
}),
{
name: 'chat-storage',
storage: createJSONStorage(() => AsyncStorage),
}
)
)5. Voice Input/Output Integration
Voice Service
// src/services/voice.ts
import Voice from '@react-native-voice/voice'
import * as Speech from 'expo-speech'
export class VoiceService {
private isListening = false
private onResult: ((text: string) => void) | null = null
constructor() {
this.setupVoiceRecognition()
}
private setupVoiceRecognition() {
Voice.onSpeechStart = this.onSpeechStart
Voice.onSpeechEnd = this.onSpeechEnd
Voice.onSpeechResults = this.onSpeechResults
Voice.onSpeechError = this.onSpeechError
}
private onSpeechStart = () => {
console.log('Speech recognition started')
}
private onSpeechEnd = () => {
this.isListening = false
console.log('Speech recognition ended')
}
private onSpeechResults = (event: any) => {
if (event.value && event.value.length > 0) {
const text = event.value[0]
this.onResult?.(text)
}
}
private onSpeechError = (error: any) => {
console.error('Speech recognition error:', error)
this.isListening = false
}
async startListening(onResult: (text: string) => void) {
try {
this.onResult = onResult
this.isListening = true
await Voice.start('en-US')
} catch (error) {
console.error('Failed to start voice recognition:', error)
this.isListening = false
}
}
async stopListening() {
try {
await Voice.stop()
this.isListening = false
} catch (error) {
console.error('Failed to stop voice recognition:', error)
}
}
async speak(text: string, options?: Speech.SpeechOptions) {
const defaultOptions: Speech.SpeechOptions = {
language: 'en-US',
pitch: 1.0,
rate: 1.0,
...options,
}
await Speech.speak(text, defaultOptions)
}
async stopSpeaking() {
await Speech.stop()
}
async isSpeaking(): Promise<boolean> {
return await Speech.isSpeakingAsync()
}
destroy() {
Voice.destroy().then(Voice.removeAllListeners)
}
}Voice-Enabled Chat Input
// src/components/ChatInput.tsx
import React, { useState, useRef } from 'react'
import {
View,
TextInput,
TouchableOpacity,
StyleSheet,
Animated,
Platform,
} from 'react-native'
import { Ionicons } from '@expo/vector-icons'
import { VoiceService } from '../services/voice'
import { ImagePickerService } from '../services/imagePicker'
interface Props {
onSend: (message: string, image?: string) => void
isLoading: boolean
}
const voiceService = new VoiceService()
const imagePickerService = new ImagePickerService()
export default function ChatInput({ onSend, isLoading }: Props) {
const [text, setText] = useState('')
const [isRecording, setIsRecording] = useState(false)
const [selectedImage, setSelectedImage] = useState<string>()
const pulseAnim = useRef(new Animated.Value(1)).current
const startRecording = async () => {
setIsRecording(true)
// Start pulse animation
Animated.loop(
Animated.sequence([
Animated.timing(pulseAnim, {
toValue: 1.2,
duration: 500,
useNativeDriver: true,
}),
Animated.timing(pulseAnim, {
toValue: 1,
duration: 500,
useNativeDriver: true,
}),
])
).start()
await voiceService.startListening((result) => {
setText(result)
stopRecording()
})
}
const stopRecording = async () => {
setIsRecording(false)
pulseAnim.stopAnimation()
await voiceService.stopListening()
}
const handleSend = () => {
if (text.trim() || selectedImage) {
onSend(text.trim(), selectedImage)
setText('')
setSelectedImage(undefined)
}
}
const pickImage = async () => {
const result = await imagePickerService.pickImage()
if (result) {
setSelectedImage(result)
}
}
return (
<View style={styles.container}>
{selectedImage && (
<View style={styles.imagePreview}>
<Image source={{ uri: selectedImage }} style={styles.previewImage} />
<TouchableOpacity
onPress={() => setSelectedImage(undefined)}
style={styles.removeImage}
>
<Ionicons name="close-circle" size={24} color="#fff" />
</TouchableOpacity>
</View>
)}
<View style={styles.inputRow}>
<TouchableOpacity onPress={pickImage} style={styles.iconButton}>
<Ionicons name="image-outline" size={24} color="#999" />
</TouchableOpacity>
<TextInput
style={styles.input}
value={text}
onChangeText={setText}
placeholder="Type a message..."
placeholderTextColor="#666"
multiline
maxHeight={100}
editable={!isRecording}
/>
{text.trim() || selectedImage ? (
<TouchableOpacity
onPress={handleSend}
style={styles.sendButton}
disabled={isLoading}
>
<Ionicons
name="send"
size={24}
color={isLoading ? "#666" : "#0084ff"}
/>
</TouchableOpacity>
) : (
<Animated.View style={{ transform: [{ scale: pulseAnim }] }}>
<TouchableOpacity
onPress={isRecording ? stopRecording : startRecording}
style={[styles.voiceButton, isRecording && styles.recording]}
>
<Ionicons
name="mic"
size={24}
color={isRecording ? "#ff0000" : "#999"}
/>
</TouchableOpacity>
</Animated.View>
)}
</View>
</View>
)
}
const styles = StyleSheet.create({
container: {
borderTopWidth: 1,
borderTopColor: '#333',
paddingTop: 8,
paddingHorizontal: 16,
paddingBottom: Platform.OS === 'ios' ? 20 : 8,
},
inputRow: {
flexDirection: 'row',
alignItems: 'flex-end',
},
input: {
flex: 1,
backgroundColor: '#1a1a1a',
borderRadius: 20,
paddingHorizontal: 16,
paddingVertical: 8,
marginHorizontal: 8,
maxHeight: 100,
color: '#fff',
fontSize: 16,
},
iconButton: {
padding: 8,
},
sendButton: {
padding: 8,
},
voiceButton: {
padding: 8,
},
recording: {
backgroundColor: 'rgba(255, 0, 0, 0.1)',
borderRadius: 20,
},
imagePreview: {
marginBottom: 8,
position: 'relative',
},
previewImage: {
width: 100,
height: 100,
borderRadius: 8,
},
removeImage: {
position: 'absolute',
top: -8,
right: -8,
},
})6. Image Upload for Multimodal
Image Picker Service
// src/services/imagePicker.ts
import * as ImagePicker from 'expo-image-picker'
import * as FileSystem from 'expo-file-system'
import { manipulateAsync, SaveFormat } from 'expo-image-manipulator'
export class ImagePickerService {
async pickImage(): Promise<string | null> {
// Request permission
const { status } = await ImagePicker.requestMediaLibraryPermissionsAsync()
if (status !== 'granted') {
alert('Sorry, we need camera roll permissions to upload images!')
return null
}
const result = await ImagePicker.launchImageLibraryAsync({
mediaTypes: ImagePicker.MediaTypeOptions.Images,
allowsEditing: true,
aspect: [4, 3],
quality: 0.8,
})
if (!result.canceled && result.assets[0]) {
// Compress image
const compressed = await this.compressImage(result.assets[0].uri)
return compressed
}
return null
}
async takePhoto(): Promise<string | null> {
// Request permission
const { status } = await ImagePicker.requestCameraPermissionsAsync()
if (status !== 'granted') {
alert('Sorry, we need camera permissions to take photos!')
return null
}
const result = await ImagePicker.launchCameraAsync({
allowsEditing: true,
aspect: [4, 3],
quality: 0.8,
})
if (!result.canceled && result.assets[0]) {
const compressed = await this.compressImage(result.assets[0].uri)
return compressed
}
return null
}
private async compressImage(uri: string): Promise<string> {
const manipResult = await manipulateAsync(
uri,
[{ resize: { width: 1024 } }],
{ compress: 0.8, format: SaveFormat.JPEG }
)
return manipResult.uri
}
async convertToBase64(uri: string): Promise<string> {
const base64 = await FileSystem.readAsStringAsync(uri, {
encoding: FileSystem.EncodingType.Base64,
})
return `data:image/jpeg;base64,${base64}`
}
}Multimodal Tips
- • Compress images before uploading to reduce bandwidth
- • Show upload progress for better UX
- • Cache processed images locally
- • Handle network failures gracefully
7. Push Notifications
Notification Service
// src/services/notifications.ts
import * as Notifications from 'expo-notifications'
import * as Device from 'expo-device'
import { Platform } from 'react-native'
Notifications.setNotificationHandler({
handleNotification: async () => ({
shouldShowAlert: true,
shouldPlaySound: true,
shouldSetBadge: false,
}),
})
export class NotificationService {
private expoPushToken: string | null = null
async registerForPushNotifications() {
if (!Device.isDevice) {
console.log('Must use physical device for Push Notifications')
return null
}
const { status: existingStatus } = await Notifications.getPermissionsAsync()
let finalStatus = existingStatus
if (existingStatus !== 'granted') {
const { status } = await Notifications.requestPermissionsAsync()
finalStatus = status
}
if (finalStatus !== 'granted') {
console.log('Failed to get push token for push notification!')
return null
}
const token = (await Notifications.getExpoPushTokenAsync()).data
this.expoPushToken = token
if (Platform.OS === 'android') {
Notifications.setNotificationChannelAsync('default', {
name: 'default',
importance: Notifications.AndroidImportance.MAX,
vibrationPattern: [0, 250, 250, 250],
lightColor: '#FF231F7C',
})
}
return token
}
async scheduleNotification(title: string, body: string, data?: any) {
await Notifications.scheduleNotificationAsync({
content: {
title,
body,
data,
},
trigger: { seconds: 1 },
})
}
async scheduleAsyncResponseNotification(taskId: string) {
await this.scheduleNotification(
'AI Response Ready',
'Your AI assistant has finished processing your request',
{ taskId }
)
}
addNotificationReceivedListener(
listener: (notification: Notifications.Notification) => void
) {
return Notifications.addNotificationReceivedListener(listener)
}
addNotificationResponseListener(
listener: (response: Notifications.NotificationResponse) => void
) {
return Notifications.addNotificationResponseReceivedListener(listener)
}
}8. State Management with Zustand
Settings Store
// src/store/settingsStore.ts
import { create } from 'zustand'
import { persist, createJSONStorage } from 'zustand/middleware'
import AsyncStorage from '@react-native-async-storage/async-storage'
interface SettingsState {
apiKey: string
provider: 'openai' | 'anthropic' | 'parrotrouter'
model: string
temperature: number
maxTokens: number
voiceEnabled: boolean
autoSpeak: boolean
theme: 'light' | 'dark' | 'auto'
setApiKey: (key: string) => void
setProvider: (provider: string) => void
setModel: (model: string) => void
setTemperature: (temp: number) => void
setMaxTokens: (tokens: number) => void
toggleVoice: () => void
toggleAutoSpeak: () => void
setTheme: (theme: 'light' | 'dark' | 'auto') => void
}
export const useSettingsStore = create<SettingsState>()(
persist(
(set) => ({
apiKey: '',
provider: 'parrotrouter',
model: 'gpt-3.5-turbo',
temperature: 0.7,
maxTokens: 1000,
voiceEnabled: true,
autoSpeak: false,
theme: 'auto',
setApiKey: (key) => set({ apiKey: key }),
setProvider: (provider) => set({ provider: provider as any }),
setModel: (model) => set({ model }),
setTemperature: (temp) => set({ temperature: temp }),
setMaxTokens: (tokens) => set({ maxTokens: tokens }),
toggleVoice: () => set((state) => ({ voiceEnabled: !state.voiceEnabled })),
toggleAutoSpeak: () => set((state) => ({ autoSpeak: !state.autoSpeak })),
setTheme: (theme) => set({ theme }),
}),
{
name: 'settings-storage',
storage: createJSONStorage(() => AsyncStorage),
}
)
)
// Helper to get settings outside of React
export const getSettingsStore = () => useSettingsStore.getState()9. Performance Optimization
Optimized Message List
// src/components/OptimizedMessageList.tsx
import React, { memo, useCallback } from 'react'
import { FlatList, View, Text, StyleSheet } from 'react-native'
import { Message } from '../api/types'
import ChatMessage from './ChatMessage'
interface Props {
messages: Message[]
onEndReached?: () => void
}
const OptimizedMessageList = memo(({ messages, onEndReached }: Props) => {
const keyExtractor = useCallback((item: Message) => item.id, [])
const renderItem = useCallback(({ item }: { item: Message }) => (
<ChatMessage message={item} />
), [])
const getItemLayout = useCallback((data: any, index: number) => ({
length: 80, // Estimated item height
offset: 80 * index,
index,
}), [])
const ItemSeparator = memo(() => <View style={styles.separator} />)
return (
<FlatList
data={messages}
renderItem={renderItem}
keyExtractor={keyExtractor}
getItemLayout={getItemLayout}
ItemSeparatorComponent={ItemSeparator}
onEndReached={onEndReached}
onEndReachedThreshold={0.5}
removeClippedSubviews={true}
maxToRenderPerBatch={10}
updateCellsBatchingPeriod={50}
windowSize={10}
initialNumToRender={10}
maintainVisibleContentPosition={{
minIndexForVisible: 0,
}}
/>
)
})
const styles = StyleSheet.create({
separator: {
height: 8,
},
})
export default OptimizedMessageListPerformance Monitoring
// src/utils/performance.ts
import { InteractionManager } from 'react-native'
export class PerformanceMonitor {
private marks = new Map<string, number>()
mark(name: string) {
this.marks.set(name, performance.now())
}
measure(name: string, startMark: string) {
const start = this.marks.get(startMark)
if (!start) {
console.warn(`No mark found for ${startMark}`)
return
}
const duration = performance.now() - start
console.log(`Performance: ${name} took ${duration.toFixed(2)}ms`)
// Send to analytics if needed
this.reportMetric(name, duration)
}
private reportMetric(name: string, duration: number) {
// Implement analytics reporting
}
// Debounce helper for expensive operations
static debounce<T extends (...args: any[]) => any>(
func: T,
wait: number
): (...args: Parameters<T>) => void {
let timeout: NodeJS.Timeout
return (...args: Parameters<T>) => {
clearTimeout(timeout)
timeout = setTimeout(() => func(...args), wait)
}
}
// Run after interactions
static runAfterInteractions(callback: () => void) {
InteractionManager.runAfterInteractions(callback)
}
}Performance Best Practices
- • Use React.memo for expensive components
- • Implement virtualized lists for long conversations
- • Debounce user input and API calls
- • Lazy load images and heavy components
- • Use InteractionManager for non-critical updates
- • Profile with Flipper or React DevTools
10. App Store Deployment
iOS Configuration
// app.json iOS configuration
{
"expo": {
"ios": {
"bundleIdentifier": "com.yourcompany.aichat",
"buildNumber": "1.0.0",
"supportsTablet": true,
"usesAppleSignIn": false,
"config": {
"usesNonExemptEncryption": false
},
"infoPlist": {
"NSMicrophoneUsageDescription": "This app uses the microphone for voice input to send messages to your AI assistant.",
"NSPhotoLibraryUsageDescription": "This app accesses your photo library to send images for AI analysis.",
"NSCameraUsageDescription": "This app uses the camera to take photos for AI analysis.",
"ITSAppUsesNonExemptEncryption": false
}
}
}
}Android Configuration
// app.json Android configuration
{
"expo": {
"android": {
"package": "com.yourcompany.aichat",
"versionCode": 1,
"adaptiveIcon": {
"foregroundImage": "./assets/adaptive-icon.png",
"backgroundColor": "#000000"
},
"permissions": [
"RECORD_AUDIO",
"READ_EXTERNAL_STORAGE",
"WRITE_EXTERNAL_STORAGE",
"CAMERA",
"VIBRATE"
],
"config": {
"googleMobileAdsAppId": "ca-app-pub-xxxxx"
}
}
}
}Build & Submit Process
# Install EAS CLI npm install -g eas-cli # Configure EAS eas build:configure # Build for iOS eas build --platform ios # Build for Android eas build --platform android # Submit to App Store eas submit --platform ios # Submit to Google Play eas submit --platform android
App Store Guidelines Compliance
⚠️ App Store Requirements
- ✓ Implement content filtering for inappropriate AI responses
- ✓ Add clear disclosure that content is AI-generated
- ✓ Include privacy policy explaining data usage
- ✓ Implement age restrictions if necessary
- ✓ Never hardcode API keys in the app
- ✓ Use secure HTTPS connections only
- ✓ Handle offline scenarios gracefully
- ✓ Provide clear error messages
Privacy Policy Template
# Privacy Policy for AI Chat App ## Data Collection - Messages sent to AI assistants - Voice recordings (converted to text, not stored) - Images selected for analysis - Device information for analytics ## Data Usage - Providing AI responses via ParrotRouter API - Improving app performance - Anonymous analytics ## Data Storage - Conversations stored locally on device - Optional cloud backup with encryption - No sharing with third parties ## User Rights - Export conversation history - Delete all data - Opt-out of analytics
Complete Example App
Here's how all the pieces come together in the main App component:
// App.tsx
import React, { useEffect } from 'react'
import { NavigationContainer } from '@react-navigation/native'
import { createBottomTabNavigator } from '@react-navigation/bottom-tabs'
import { Ionicons } from '@expo/vector-icons'
import { StatusBar } from 'expo-status-bar'
import { useColorScheme } from 'react-native'
import NetInfo from '@react-native-community/netinfo'
import ChatScreen from './src/screens/ChatScreen'
import HistoryScreen from './src/screens/HistoryScreen'
import SettingsScreen from './src/screens/SettingsScreen'
import { useChatStore } from './src/store/chatStore'
import { useSettingsStore } from './src/store/settingsStore'
import { NotificationService } from './src/services/notifications'
import { StorageService } from './src/services/storage'
const Tab = createBottomTabNavigator()
const notificationService = new NotificationService()
const storageService = new StorageService()
export default function App() {
const colorScheme = useColorScheme()
const { setOnlineStatus, createNewConversation } = useChatStore()
const { theme } = useSettingsStore()
useEffect(() => {
// Initialize services
storageService.init()
notificationService.registerForPushNotifications()
// Create initial conversation
createNewConversation()
// Monitor network status
const unsubscribe = NetInfo.addEventListener(state => {
setOnlineStatus(state.isConnected ?? false)
})
// Set up notification listeners
const notificationListener = notificationService.addNotificationReceivedListener(
notification => {
console.log('Notification received:', notification)
}
)
const responseListener = notificationService.addNotificationResponseListener(
response => {
console.log('Notification response:', response)
// Navigate to relevant screen
}
)
return () => {
unsubscribe()
notificationListener.remove()
responseListener.remove()
}
}, [])
const isDark = theme === 'dark' || (theme === 'auto' && colorScheme === 'dark')
return (
<>
<StatusBar style={isDark ? 'light' : 'dark'} />
<NavigationContainer
theme={isDark ? DarkTheme : DefaultTheme}
>
<Tab.Navigator
screenOptions={({ route }) => ({
tabBarIcon: ({ focused, color, size }) => {
let iconName: keyof typeof Ionicons.glyphMap
if (route.name === 'Chat') {
iconName = focused ? 'chatbubbles' : 'chatbubbles-outline'
} else if (route.name === 'History') {
iconName = focused ? 'time' : 'time-outline'
} else {
iconName = focused ? 'settings' : 'settings-outline'
}
return <Ionicons name={iconName} size={size} color={color} />
},
tabBarActiveTintColor: '#0084ff',
tabBarInactiveTintColor: 'gray',
headerStyle: {
backgroundColor: isDark ? '#000' : '#fff',
},
headerTintColor: isDark ? '#fff' : '#000',
})}
>
<Tab.Screen name="Chat" component={ChatScreen} />
<Tab.Screen name="History" component={HistoryScreen} />
<Tab.Screen name="Settings" component={SettingsScreen} />
</Tab.Navigator>
</NavigationContainer>
</>
)
}References & Citations
Ready to Build Mobile AI Apps?
Start creating powerful React Native applications with integrated LLM capabilities using ParrotRouter's unified API gateway.
References
- [1] AWS. "Lambda Documentation" (2024)
- [2] Vercel. "Streaming Responses" (2024)
- [3] GitHub. "OpenAI Node.js Library" (2024)