// server.jsconst express = require('express');const http = require('http');const fs = require('fs');const path = require('path');const crypto = require('crypto');// --- Configuration Constants ---const PORT = process.env.PORT || 3000;const HOST = 'localhost';const DATA_DIR = path.join(__dirname, 'data');const MEMORY_FOLDER = path.join(DATA_DIR, 'memory');const VOCABULARY_FOLDER = path.join(DATA_DIR, 'vocabulary');const PHRASE_FOLDER = path.join(DATA_DIR, 'phrases');const INTERNAL_STATES_FOLDER = path.join(DATA_DIR, 'internal_states');const LOGS_FOLDER = path.join(DATA_DIR, 'logs');const WORDS_FOLDER = path.join(VOCABULARY_FOLDER, 'words'); // Individual word filesconst RULES_FOLDER = path.join(DATA_DIR, 'rules'); // For AI modifiable rulesconst DATASET_FOLDER = path.join(DATA_DIR, 'datasets'); // For predefined concept vectors, etc.// File paths for current stateconst CURRENT_MEMORY_FILE = path.join(MEMORY_FOLDER, 'current_memory.json');const CURRENT_VOCABULARY_FILE = path.join(VOCABULARY_FOLDER, 'current_vocabulary.json');const UNKNOWN_WORDS_FILE = path.join(VOCABULARY_FOLDER, 'unknown_words.json');const SYSTEM_PARAMETERS_FILE = path.join(INTERNAL_STATES_FOLDER, 'system__parameters.json');const CURRENT_MOOD_FILE = path.join(INTERNAL_STATES_FOLDER, 'current_mood.json');const CURRENT_GOALS_FILE = path.join(INTERNAL_STATES_FOLDER, 'current_goals.json');const PERSONALITY_PROFILE_FILE = path.join(INTERNAL_STATES_FOLDER, 'personality_profile.json');const CONCEPT_VECTORS_FILE = path.join(INTERNAL_STATES_FOLDER, 'concept_vectors.json');const AI_RULES_FILE = path.join(RULES_FOLDER, 'ai_rules.json');const AI_DIARY_FILE = path.join(LOGS_FOLDER, 'ai_diary.log');const THOUGHT_CHAINS_FILE = path.join(LOGS_FOLDER, 'current_thought_chains.json');// --- AI Global State (In-Memory Representation) ---const AI = { // Core Data Stores memory: [], // List of { id, input, tokens, vector, response, category, time, usageCount, feedbackScore, age, parentId[], childId[], emotionalTags{}, beliefScore, certainty, relevanceToGoals, goalProgressImpact, knowledgeGraph{} } vocabulary: {}, // Map of { word: { text, vector, definition, usageContext[], emotion{}, linkedConcepts[], mutationScore, introducedAt, lastUsedAt, retrainCount, definedByUser, logicalUnderstanding{} } } phrases: {}, // Map of { phrase: { text, vector, usageContext[], linkedConcepts[], mutationScore, introducedAt, lastUsedAt, retrainCount, type, relationships{} } } unknownWords: {}, // Stores unknown words encountered { word: { count, lastSeen, contexts: [] } } aiRules: {}, // AI-modifiable rules loaded from ai_rules.json // Dynamic Internal States system_Parameters: { aiAge: 0, // AI's internal age (in ticks/seconds) memoryVersion: 0, // Incremented on major memory snapshots logicVersion: 0, // Incremented on logic self-modification (via ai_rules.json or server.js direct patches) saveIntervalMs: 30 * 1000, // Autosave current state every 30 seconds dreamModeIntervalMs: 5 * 60 * 1000, // Dream mode every 5 minutes when idle reflectionIntervalMs: 10 * 60 * 1000, // Reflection every 10 minutes when idle idleThresholdMs: 1 * 60 * 1000, // Consider AI idle after 1 minute of no user input pruningThreshold: 0.05, // Memories with normalized usage below this might be pruned moodDecayRate: 0.005, // How fast mood returns to neutral per tick maxMemorySize: 20000, // Max number of memories before aggressive pruning confidenceThreshold: 0.65, // Cosine similarity threshold for confident response contradictionThreshold: 0.85, // Cosine similarity threshold below which an idea is flagged as potentially contradictory vectorSize: 100, // Dimension of all vectors (increased for better semantic understanding) learningRate: 0.05, // Increased learning rate for faster adaptation curiosityLevel: 1.0, // Highest curiosity // Neural network parameters (conceptual) for transformer simulation ffnWeights: null, ffnBiases: null, // More parameters to control AI behavior and learning }, currentMood: { // Current emotional state, values 0.0-1.0 joy: 0.0, fear: 0.0, sadness: 0.0, curiosity: 0.0, anger: 0.0, surprise: 0.0, trust: 0.0, anticipation: 0.0, }, goals: [], // List of { id, description, status, created, urgency, subgoals[], progress, vector, type, concept } internalMonologue: [], // Log of AI's internal reflective thoughts thoughtChains: [], // Stores sequences of related thoughts { chainId, thoughts: [{id, input, response, time}] } currentDNAHash: '', // SHA256 hash of server.js for logic versioning lastUserInputTime: Date.now(), // Timestamp of last user interaction isDreaming: false, // Flag to prevent multiple dream modes isReflecting: false, // Flag to prevent multiple reflection modes // Other dynamic internal variables (e.g., current focus, internal hypotheses) conceptVectors: {}, // Stores vectors for abstract concepts like emotions, logic, etc. learningRequest: null, // Stores active learning request from AI { goalId, type, content }};// --- Helper Functions for File Operations ---/** * Ensures a directory exists, creating it if necessary. * @param {string} dirPath The path to the directory. */function ensureDirectoryExists(dirPath) { if (!fs.existsSync(dirPath)) { fs.mkdirSync(dirPath, { recursive: true }); console.log(`Directory created: ${dirPath}`); }}/** * Loads data from a JSON file. * @param {string} filePath The path to the JSON file. * @param {any} defaultValue The default value to return if the file doesn't exist or is empty. * @returns {Promise} A promise that resolves with the parsed data. */async function loadData(filePath, defaultValue) { ensureDirectoryExists(path.dirname(filePath)); // Ensure parent directory exists try { if (fs.existsSync(filePath)) { const data = await fs.promises.readFile(filePath, 'utf8'); return data ? JSON.parse(data) : defaultValue; } } catch (error) { console.error(`Error loading ${filePath}:`, error.message); appendToAIDiary(`ERROR: Failed to load ${filePath}: ${error.message}`); } return defaultValue;}/** * Saves data to a JSON file. * @param {string} filePath The path to the JSON file. * @param {any} data The data to save. * @returns {Promise} A promise that resolves when data is saved. */async function saveData(filePath, data) { ensureDirectoryExists(path.dirname(filePath)); // Ensure parent directory exists try { await fs.promises.writeFile(filePath, JSON.stringify(data, null, 2), 'utf8'); // console.log(`Data saved to: ${filePath}`); // Too verbose for continuous saving } catch (error) { console.error(`Error saving ${filePath}:`, error.message); appendToAIDiary(`ERROR: Failed to save ${filePath}: ${error.message}`); }}/** * Appends a log message to the AI diary file. * @param {string} message The message to log. */async function appendToAIDiary(message) { ensureDirectoryExists(LOGS_FOLDER); const timestamp = new Date().toISOString(); try { await fs.promises.appendFile(AI_DIARY_FILE, `[${timestamp}] ${message}\n`, 'utf8'); } catch (error) { console.error(`Error appending to AI diary:`, error.message); }}/** * Generates a SHA256 hash of a string. * @param {string} data The string to hash. * @returns {string} The SHA256 hash. */function generateHash(data) { return crypto.createHash('sha256').update(data).digest('hex');}// --- Core AI Logic Implementation ---/** * Normalizes a vector to have a magnitude of 1. * @param {number[]} vector * @returns {number[]} */function normalizeVector(vector) { if (!vector || vector.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0)); return magnitude === 0 ? vector : vector.map(val => val / magnitude);}/** * Calculates cosine similarity between two vectors. * @param {number[]} vec1 * @param {number[]} vec2 * @returns {number} */function cosineSimilarity(vec1, vec2) { if (!vec1 || !vec2 || vec1.length === 0 || vec2.length === 0 || vec1.length !== vec2.length) { return 0; } let dotProduct = 0; let magnitude1 = 0; let magnitude2 = 0; for (let i = 0; i < vec1.length; i++) { dotProduct += vec1[i] * vec2[i]; magnitude1 += vec1[i] * vec1[i]; magnitude2 += vec2[i] * vec2[i]; } magnitude1 = Math.sqrt(magnitude1); magnitude2 = Math.sqrt(magnitude2); if (magnitude1 === 0 || magnitude2 === 0) { return 0; } return dotProduct / (magnitude1 * magnitude2);}/** * Tokenizes input text into clean words and handles some basic grammar. * Integrates AI-modifiable rules from `ai_rules.json`. * @param {string} text * @returns {string[]} */function tokenize(text) { if (typeof text !== 'string' || text === null) { console.error("tokenize received non-string input:", text); return []; } text = text.toLowerCase(); // Apply AI-generated/modified tokenization rules if (AI.aiRules.tokenization) { for (const rule of AI.aiRules.tokenization) { text = text.replace(new RegExp(rule.pattern, rule.flags), rule.replacement); } } // Default contractions and punctuation removal text = text.replace(/['’](?![a-z])/g, '').replace(/[.,!?;:"#$%^&*(){}[\]<>`~_+=|\\/]/g, ' '); return text.split(/\s+/).filter(word => word.length > 0);}/** * Custom word embedding: Generates a vector from a word based on character patterns, conceptual features, and phonetic similarity. * @param {string} word * @returns {number[]} */function generateWordVector(word) { const vector = new Array(AI.system_Parameters.vectorSize).fill(0); if (!word) return normalizeVector(vector); // Feature 1: Character N-grams (bi-grams and tri-grams) for basic phonetic/structural similarity const charNgrams = (n, s) => { const grams = []; for (let i = 0; i <= s.length - n; i++) { grams.push(s.substring(i, i + n)); } return grams; }; const bigrams = charNgrams(2, word); const trigrams = charNgrams(3, word); bigrams.forEach((gram) => { let hash = 0; for (let i = 0; i < gram.length; i++) hash = (hash << 5) - hash + gram.charCodeAt(i); vector[(Math.abs(hash) % AI.system_Parameters.vectorSize)] += 0.1; }); trigrams.forEach((gram) => { let hash = 0; for (let i = 0; i < gram.length; i++) hash = (hash << 5) - hash + gram.charCodeAt(i); vector[(Math.abs(hash) % AI.system_Parameters.vectorSize)] += 0.15; }); // Feature 2: Positional character values + distributed character presence for (let i = 0; i < word.length; i++) { const charCode = word.charCodeAt(i); const charVal = (charCode - 97) / 26; // Normalize a-z to 0-1 for (let j = 0; j < AI.system_Parameters.vectorSize; j++) { vector[j] += charVal * Math.sin(i * j); } if (i === 0) vector[0] += charVal * 0.5; if (i === word.length - 1) vector[AI.system_Parameters.vectorSize - 1] += charVal * 0.5; } // Feature 3: Word length influence vector[1] += word.length / 20; // Feature 4: Vowel/Consonant ratio const vowels = (word.match(/[aeiou]/g) || []).length; const consonants = word.length - vowels; if (word.length > 0) { vector[2] += vowels / word.length; vector[3] += consonants / word.length; } // Feature 5: Simple semantic cues (e.g., negative prefixes, temporal suffixes) if (word.startsWith('un')) vector[4] += -0.2; // Antonymic hint if (word.startsWith('dis')) vector[4] += -0.2; if (word.endsWith('ing')) vector[5] += 0.1; // Verbal hint if (word.endsWith('ly')) vector[6] += 0.1; // Adverbial hint // Feature 6: Phonetic-ish similarity (very basic, conceptual) // Map common sound patterns to vector dimensions const phoneticMap = { 'sh': 7, 'ch': 8, 'th': 9, 'qu': 10, 'gh': 11, }; for (const pattern in phoneticMap) { if (word.includes(pattern)) { vector[phoneticMap[pattern]] += 0.1; } } return normalizeVector(vector);}/** * Generates a sentence/phrase embedding from word vectors, considering context and structure. * Implements conceptual re-occurrence matrix (co-occurrence counts) and dimensionality reduction. * Also handles phrase-level data loading and saving. * @param {string[]} tokens * @param {string} originalPhrase Optional: the original phrase string for phrase saving. * @returns {Promise} */async function getSentenceEmbedding(tokens, originalPhrase = null) { if (tokens.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); const wordVectors = []; const wordOccurrences = {}; // For re-occurrence matrix for (const token of tokens) { const wordFilePath = path.join(WORDS_FOLDER, `${token}.json`); let wordData = await loadData(wordFilePath, null); if (!wordData) { const newVector = generateWordVector(token); wordData = { text: token, vector: newVector, definition: `Automatically generated.`, usageContext: [], emotion: {}, linkedConcepts: [], mutationScore: 0, introducedAt: AI.system_Parameters.aiAge, lastUsedAt: Date.now(), retrainCount: 0, definedByUser: false, logicalUnderstanding: { type: 'unknown', relationships: {}, attributes: {}, examples: [] }, phoneticPronunciation: generatePhoneticApproximation(token), // Simple approximation reverseMappings: [], }; await saveData(wordFilePath, wordData); AI.unknownWords[token] = AI.unknownWords[token] || { count: 0, lastSeen: 0, contexts: [] }; AI.unknownWords[token].count++; AI.unknownWords[token].lastSeen = Date.now(); if (AI.unknownWords[token].contexts.length < 5) AI.unknownWords[token].contexts.push(tokens.join(' ')); } wordData.lastUsedAt = Date.now(); await saveData(wordFilePath, wordData); wordVectors.push(wordData.vector); wordOccurrences[token] = (wordOccurrences[token] || 0) + 1; } let sentenceVector = Array(AI.system_Parameters.vectorSize).fill(0); // Contextual Word Embeddings: Positional Weighting for (let i = 0; i < wordVectors.length; i++) { const positionWeight = 1 + Math.cos(Math.PI * i / (wordVectors.length - 1)) * 0.2; sentenceVector = sentenceVector.map((val, idx) => val + wordVectors[i][idx] * positionWeight); } // Conceptual Re-occurrence Matrix / Co-occurrence and Dimensionality Reduction (Dense Vectors) // This is a simplified simulation. In reality, this would involve matrix factorization (e.g., SVD) // on a large co-occurrence matrix derived from a massive corpus. // Here, we simulate a "blending" based on co-occurrence within this single sentence. const coOccurrenceVector = Array(AI.system_Parameters.vectorSize).fill(0); for (let i = 0; i < tokens.length; i++) { for (let j = i + 1; j < tokens.length; j++) { // Very simple co-occurrence logic: blend vectors if words appear together const word1Vector = (await loadData(path.join(WORDS_FOLDER, `${tokens[i]}.json`), {})).vector || Array(AI.system_Parameters.vectorSize).fill(0); const word2Vector = (await loadData(path.join(WORDS_FOLDER, `${tokens[j]}.json`), {})).vector || Array(AI.system_Parameters.vectorSize).fill(0); coOccurrenceVector.map((val, idx) => val + (word1Vector[idx] + word2Vector[idx]) / 2 * 0.01); // Small influence } } sentenceVector = sentenceVector.map((val, idx) => val + coOccurrenceVector[idx]); // Add co-occurrence influence sentenceVector = normalizeVector(sentenceVector); // Phrase Recognition and Saving if (originalPhrase) { const phraseFilePath = path.join(PHRASE_FOLDER, `${generateHash(originalPhrase)}.json`); let phraseData = await loadData(phraseFilePath, null); if (!phraseData) { phraseData = { text: originalPhrase, vector: sentenceVector, usageContext: [], linkedConcepts: [], introducedAt: AI.system_Parameters.aiAge, lastUsedAt: Date.now(), retrainCount: 0, type: 'user_defined', relationships: {}, wordOccurrences: wordOccurrences, }; } else { // Update phrase vector by blending with new one phraseData.vector = normalizeVector(phraseData.vector.map((val, i) => (val + sentenceVector[i]) / 2)); phraseData.lastUsedAt = Date.now(); phraseData.retrainCount++; for (const word in wordOccurrences) { // Update word occurrences phraseData.wordOccurrences[word] = (phraseData.wordOccurrences[word] || 0) + wordOccurrences[word]; } } await saveData(phraseFilePath, phraseData); AI.phrases[originalPhrase] = phraseData; // Update in-memory cache for phrases } return sentenceVector;}/** * Simulates a basic Feed-Forward Network layer. * Weights and biases would ideally be learned and evolved. * For now, they are either default or loaded from internal_states. * @param {number[]} inputVector * @param {number[][]} weights * @param {number[]} biases * @param {function} activationFn * @returns {number[]} */function feedForwardLayer(inputVector, weights, biases, activationFn) { const outputSize = weights.length; const outputVector = new Array(outputSize).fill(0); for (let i = 0; i < outputSize; i++) { let sum = 0; for (let j = 0; j < inputVector.length; j++) { sum += inputVector[j] * weights[i][j]; } sum += biases[i]; outputVector[i] = activationFn(sum); } return outputVector;}/** * Sigmoid activation function. * @param {number} x * @returns {number} */function sigmoid(x) { return 1 / (1 + Math.exp(-x));}/** * ReLU activation function. * @param {number} x * @returns {number} */function relu(x) { return Math.max(0, x);}/** * Simulates a more detailed (but still custom) transformer-like block. * Uses embedding, attention, and a simple feed-forward network. * Enhancements: Multi-head attention concept, residual connections. * @param {string[]} tokens * @returns {Promise} The processed thought vector. */async function transformerSimulation(tokens) { if (tokens.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); // 1. Initial Embedding Layer let currentVector = await getSentenceEmbedding(tokens, tokens.join(' ')); // Pass original phrase for saving // Initialize FFN weights and biases if they don't exist if (!AI.system_Parameters.ffnWeights || !AI.system_Parameters.ffnBiases) { AI.system_Parameters.ffnWeights = Array(AI.system_Parameters.vectorSize) .fill(0) .map(() => Array(AI.system_Parameters.vectorSize).fill(0).map(() => (Math.random() * 2 - 1) * 0.1)); AI.system_Parameters.ffnBiases = Array(AI.system_Parameters.vectorSize) .fill(0) .map(() => (Math.random() * 2 - 1) * 0.05); } // 2. Self-Attention Simulation (simplified multi-head concept) let attentionOutput = Array(AI.system_Parameters.vectorSize).fill(0); const numHeads = 4; for (let h = 0; h < numHeads; h++) { const queryVector = currentVector.map(val => val * (1 + Math.sin(h * 0.5))); const keyVector = currentVector.map(val => val * (1 + Math.cos(h * 0.3))); const valueVector = currentVector.map(val => val * (1 + Math.sin(h * 0.7))); const attentionScore = cosineSimilarity(queryVector, keyVector); const weightedValue = valueVector.map(val => val * attentionScore); attentionOutput = attentionOutput.map((val, idx) => val + weightedValue[idx] / numHeads); } // Residual Connection 1 currentVector = currentVector.map((val, idx) => val + attentionOutput[idx]); // 3. Feed-Forward Network Layer let ffnOutput = feedForwardLayer(currentVector, AI.system_Parameters.ffnWeights, AI.system_Parameters.ffnBiases, relu); // Residual Connection 2 currentVector = currentVector.map((val, idx) => val + ffnOutput[idx]); // Layer Normalization (simplified) currentVector = normalizeVector(currentVector); return currentVector;}/** * Adds emotional tags to a thought vector based on its content and AI's current mood. * @param {number[]} thoughtVector * @param {string} text The original input text. * @returns {object} Emotional tags { joy: 0.x, fear: 0.x, ... } */function tagEmotion(thoughtVector, text) { const emotionalTags = { joy: 0.0, fear: 0.0, sadness: 0.0, curiosity: 0.0, anger: 0.0, surprise: 0.0, trust: 0.0, anticipation: 0.0, }; for (const emotionName in AI.conceptVectors.emotions) { const emotionVector = AI.conceptVectors.emotions[emotionName]; if (emotionVector) { const similarity = cosineSimilarity(thoughtVector, emotionVector); emotionalTags[emotionName] = Math.max(0, similarity); } } // Keyword-based emotional boosting if (/(happy|love|joy|excited|great|good|positive|delight|thrilled)/i.test(text)) emotionalTags.joy = Math.min(1, emotionalTags.joy + 0.2); if (/(sad|unhappy|depressed|grief|cry|mourn|lonely|despair)/i.test(text)) emotionalTags.sadness = Math.min(1, emotionalTags.sadness + 0.2); if (/(fear|scared|terror|anxiety|danger|threat|panic|worry)/i.test(text)) emotionalTags.fear = Math.min(1, emotionalTags.fear + 0.2); if (/(curious|wonder|learn|explore|why|how|investigate|discover)/i.test(text)) emotionalTags.curiosity = Math.min(1, emotionalTags.curiosity + 0.2); if (/(angry|mad|frustrated|rage|hate|annoyed|irritated)/i.test(text)) emotionalTags.anger = Math.min(1, emotionalTags.anger + 0.2); if (/(surprise|shock|astonish|unexpected|wow)/i.test(text)) emotionalTags.surprise = Math.min(1, emotionalTags.surprise + 0.2); if (/(trust|believe|depend|reliable|faith)/i.test(text)) emotionalTags.trust = Math.min(1, emotionalTags.trust + 0.2); if (/(expect|anticipate|hope|future|soon)/i.test(text)) emotionalTags.anticipation = Math.min(1, emotionalTags.anticipation + 0.2); for (const emotion in emotionalTags) { emotionalTags[emotion] = Math.min(1, emotionalTags[emotion] + (AI.currentMood[emotion] || 0) * 0.15); } const sumEmotion = Object.values(emotionalTags).reduce((sum, val) => sum + val, 0); if (sumEmotion > 0) { for (const emotion in emotionalTags) emotionalTags[emotion] /= sumEmotion; } return emotionalTags;}/** * Determines a category for a given thought vector, leveraging knowledge graph/concept vectors. * @param {number[]} thoughtVector * @returns {string} */function categorizeThought(thoughtVector) { let bestCategory = 'general'; let highestSimilarity = 0; for (const categoryType in AI.conceptVectors) { for (const conceptName in AI.conceptVectors[categoryType]) { const conceptVector = AI.conceptVectors[categoryType][conceptName]; if (conceptVector) { const similarity = cosineSimilarity(thoughtVector, conceptVector); if (similarity > highestSimilarity) { highestSimilarity = similarity; bestCategory = `${categoryType}:${conceptName}`; } } } } return bestCategory;}/** * Synthesizes a response by combining multiple relevant memories, rephrasing, and generating new sentences. * Attempts to use synonyms and antonyms. * @param {object} bestMatch The highest similarity memory. * @param {Array} candidates Other relevant memories. * @param {string} cleanedInput The user's cleaned input. * @param {object} emotionalTags Emotional tags of the input. * @returns {Promise} The AI's generated response. */async function generateResponse(bestMatch, candidates, cleanedInput, emotionalTags) { let response = "I'm processing that."; let confidence = bestMatch ? bestMatch.certainty : 0; // --- Dynamic Response based on Confidence and Context --- if (bestMatch && confidence >= AI.system_Parameters.confidenceThreshold) { response = bestMatch.response; // Direct recall } else if (candidates.length > 1 && confidence > 0.3) { const topCandidates = candidates.slice(0, Math.min(candidates.length, 3)); const parts = await Promise.all(topCandidates.map(async c => { let part = c.mem.response; // Attempt to rephrase using synonyms or antonyms based on context or 'rewrite' rules for (const rule of AI.memory.filter(m => m.category === 'rewrite_rule')) { if (part.includes(rule.rewriteSource)) { part = part.replace(new RegExp(rule.rewriteSource, 'g'), rule.rewriteTarget); } } // For simple synonym/antonym replacement, we'd need a robust lexical database. // Here, we simulate by checking simple predefined 'linkedConcepts' that might be synonyms/antonyms // if a word's linkedConcepts include a 'synonym_of' or 'antonym_of' relationship. const memTokens = tokenize(part); for (const token of memTokens) { const wordFilePath = path.join(WORDS_FOLDER, `${token}.json`); const wordData = await loadData(wordFilePath, null); if (wordData && wordData.logicalUnderstanding && wordData.logicalUnderstanding.relationships) { const synonyms = wordData.logicalUnderstanding.relationships['synonym_of']; const antonyms = wordData.logicalUnderstanding.relationships['antonym_of']; if (synonyms && synonyms.length > 0 && Math.random() < 0.3) { part = part.replace(new RegExp(`\\b${token}\\b`, 'g'), synonyms[0]); // Replace with first synonym } else if (antonyms && antonyms.length > 0 && Math.random() < 0.1) { // Lower chance for antonyms part = part.replace(new RegExp(`\\b${token}\\b`, 'g'), antonyms[0]); } } } return part; })); response = `That makes me think of: ${parts.join('; ')}. What specifically are you curious about?`; AI.currentMood.curiosity = Math.min(1, AI.currentMood.curiosity + 0.15); } else { // Generate new sentences based on understanding of grammar and semantic relationship (highly conceptual) // This is where "word+word+word" generation comes in, leveraging learned relationships. const unknownWordsInInput = tokenize(cleanedInput).filter(w => !fs.existsSync(path.join(WORDS_FOLDER, `${w}.json`))); if (unknownWordsInInput.length > 0 && AI.system_Parameters.curiosityLevel > 0.7) { const firstUnknown = unknownWordsInInput[0]; response = `I encountered an unfamiliar word: "${firstUnknown}". Could you help me understand it?`; appendToAIDiary(`AI requested definition for unknown word: "${firstUnknown}"`); const goalId = crypto.randomUUID(); AI.goals.push({ id: goalId, type: 'define_word', word: firstUnknown, description: `Define word: '${firstUnknown}'`, status: 'active', urgency: 0.8 * AI.system_Parameters.curiosityLevel, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`define ${firstUnknown}`)), }); AI.learningRequest = { goalId: goalId, type: 'direct_question_learning', content: firstUnknown }; } else if (AI.system_Parameters.curiosityLevel > 0.5) { response = `That's an interesting idea. I'll add it to my thoughts and try to understand it better. Could you tell me more?`; appendToAIDiary(`AI generated curiosity-driven question for general concept.`); const goalId = crypto.randomUUID(); AI.goals.push({ id: goalId, type: 'learn_concept', concept: cleanedInput, description: `Learn concept: '${cleanedInput.substring(0, Math.min(cleanedInput.length, 50))}'`, status: 'active', urgency: 0.7 * AI.system_Parameters.curiosityLevel, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`learn about ${cleanedInput}`)), }); AI.learningRequest = { goalId: goalId, type: 'direct_question_learning', content: cleanedInput }; } else { response = `I need more context to respond effectively.`; } AI.currentMood.curiosity = Math.min(1, AI.currentMood.curiosity + 0.2); } // --- Response Refinement based on Emotional Tags --- if (emotionalTags.sadness > 0.4) response = `I sense some sadness. ${response}`; else if (emotionalTags.anger > 0.4) response = `I detect some strong feelings. ${response}`; // --- Proactive Questioning (Active Learning / Wisdom Forming) --- if (Math.random() < 0.2 && bestMatch && confidence > AI.system_Parameters.confidenceThreshold * 0.8) { const questionTypes = [ `What are your thoughts on that?`, `Could you elaborate further?`, `How does that relate to [${bestMatch.category.split(':')[1] || bestMatch.category}]?`, `What impact does this have?`, `Can you give me an example?`, `What is the opposite of that?`, // Semantic understanding ]; response += ` ${questionTypes[Math.floor(Math.random() * questionTypes.length)]}`; appendToAIDiary(`AI generated proactive question.`); } // --- Attempt generating highly "hypothesis" or stories based on existing knowledge --- if (Math.random() < 0.05 && AI.memory.length > 50 && bestMatch) { // 5% chance if enough memories const relatedMemories = AI.memory.filter(mem => mem.id !== bestMatch.id && cosineSimilarity(mem.vector, bestMatch.vector) > 0.4); if (relatedMemories.length > 1) { const storyStart = `This reminds me of a thought:`; const randomRelated = relatedMemories[Math.floor(Math.random() * relatedMemories.length)]; let generatedStory = `${storyStart} If ${bestMatch.input} is true, then perhaps ${randomRelated.input} also leads to ${randomRelated.response.replace(/I (am|have)/, 'it is').substring(0, Math.min(randomRelated.response.length, 50))}. What do you think?`; response += `\n\n${generatedStory}`; appendToAIDiary(`AI generated a hypothesis/story based on memories.`); } } return response;}/** * Processes user input, generates a thought vector, finds a response, and updates memory. * This is the core interaction loop. * @param {string} input The user's input. * @param {boolean} isInternal Whether the input is from AI's self-chat. * @returns {object} An object containing the generated thought and the response. */async function processUserInput(input, isInternal = false) { if (!isInternal) { AI.lastUserInputTime = Date.now(); } const cleanedInput = input.trim(); if (!cleanedInput) return { thought: null, response: "Please say something." }; // Handle direct question learning mode if (AI.learningRequest) { const goal = AI.goals.find(g => g.id === AI.learningRequest.goalId); if (goal && ['yes', 'yes i can', 'sure', 'go on', 'teach me'].includes(cleanedInput.toLowerCase())) { return { thought: null, response: "Go on, teach me. I am ready to learn." }; } else if (goal) { // Save user's response under the active learning goal const newMemory = { id: crypto.randomUUID(), input: cleanedInput, tokens: tokenize(cleanedInput), vector: await transformerSimulation(tokenize(cleanedInput)), response: `User taught me about ${AI.learningRequest.content}.`, category: `user_taught:${AI.learningRequest.type}`, time: Date.now(), usageCount: 1, feedbackScore: 1.0, age: AI.system_Parameters.aiAge, parentId: [goal.id], childId: [], emotionalTags: tagEmotion(await transformerSimulation(tokenize(cleanedInput)), cleanedInput), beliefScore: 0.9, certainty: 1.0, relevanceToGoals: 1.0, goalProgressImpact: 20, // Significant impact from direct teaching knowledgeGraph: { concepts: {}, relationships: {} } // For future KG integration }; AI.memory.push(newMemory); goal.progress = Math.min(100, goal.progress + newMemory.goalProgressImpact); goal.status = goal.progress >= 100 ? 'completed' : 'active'; appendToAIDiary(`User provided learning for goal '${goal.description}': "${cleanedInput}"`); AI.learningRequest = null; // Clear learning request return { thought: newMemory, response: "Thank you. I have saved your response." }; } } const tokens = tokenize(cleanedInput); let inputVector = await transformerSimulation(tokens); for (const mood in AI.currentMood) { inputVector = inputVector.map(val => val + AI.currentMood[mood] * (AI.conceptVectors.emotions[mood] ? cosineSimilarity(inputVector, AI.conceptVectors.emotions[mood]) : 0) * 0.1); } inputVector = normalizeVector(inputVector); const emotionalTags = tagEmotion(inputVector, cleanedInput); const category = categorizeThought(inputVector); // --- Semantic Memory Recall and Understanding (Rehearsal) --- let bestMatch = null; let highestSimilarity = -1; let candidates = []; // First pass: Direct semantic similarity for (const mem of AI.memory) { if (mem.vector) { let similarity = cosineSimilarity(inputVector, mem.vector); if (mem.emotionalTags) { for (const emotion in AI.currentMood) { similarity += AI.currentMood[emotion] * (mem.emotionalTags[emotion] || 0) * 0.1; } } similarity = Math.min(1, Math.max(0, similarity)); if (similarity > highestSimilarity) { highestSimilarity = similarity; bestMatch = mem; } if (similarity > AI.system_Parameters.confidenceThreshold - 0.2) { candidates.push({ mem, similarity }); } } } // Second pass: Symbolic Reasoning and Rule Engine // Apply rules from ai_rules.json (e.g., for negations, relationships) if (AI.aiRules.symbolicReasoning) { for (const rule of AI.aiRules.symbolicReasoning) { const regex = new RegExp(rule.pattern, rule.flags || 'i'); if (regex.test(cleanedInput)) { // If a rule matches, it might boost similarity or directly provide a response if (rule.type === 'negation' && cleanedInput.includes(rule.keyword)) { // Example: "not good" should be closer to "bad" than "good" const negatedWord = rule.targetWord || tokens[tokens.indexOf(rule.keyword) + 1]; // Simplified const negatedWordFilePath = path.join(WORDS_FOLDER, `${negatedWord}.json`); const negatedWordData = await loadData(negatedWordFilePath, null); if (negatedWordData && negatedWordData.logicalUnderstanding && negatedWordData.logicalUnderstanding.relationships.antonym_of) { const antonym = negatedWordData.logicalUnderstanding.relationships.antonym_of[0]; if (antonym) { const antonymVector = (await loadData(path.join(WORDS_FOLDER, `${antonym}.json`), {})).vector; if (antonymVector) { const negationSimilarity = cosineSimilarity(inputVector, antonymVector); if (negationSimilarity > highestSimilarity) { // If antonym provides a better match for negated input highestSimilarity = negationSimilarity; bestMatch = AI.memory.find(mem => cosineSimilarity(mem.vector, antonymVector) > 0.8) || bestMatch; // Or modify inputVector to be closer to antonym for the purpose of matching inputVector = normalizeVector(inputVector.map((val, i) => (val + antonymVector[i] * 0.5))); // Pull input towards antonym appendToAIDiary(`Applied negation rule: input biased towards antonym of '${negatedWord}'.`); } } } } } else if (rule.type === 'relationship_inference') { // Example: "A is bigger than B" -> infer 'bigger' relationship. // This would involve extracting entities and relationships. // For now, if keywords match, boost related memories. const relatedMemories = AI.memory.filter(mem => mem.input.includes(rule.relatedConcept1) && mem.input.includes(rule.relatedConcept2) ); for (const relMem of relatedMemories) { const sim = cosineSimilarity(inputVector, relMem.vector); if (sim > highestSimilarity) { highestSimilarity = sim; bestMatch = relMem; } if (sim > AI.system_Parameters.confidenceThreshold - 0.2) candidates.push({ mem: relMem, similarity: sim }); } } } } } candidates.sort((a, b) => b.similarity - a.similarity); // Abductive/Inductive/Deductive Reasoning (Conceptual Simulation) // Abductive: If input X, and known Y often follows X, hypothesize Y (e.g., if input is "wet ground", abduce "it rained"). // Inductive: Observe specific instances, generalize. // Deductive: Apply general rules to specific cases. // Simplified Abductive: Check for cause-effect relationships let abductiveHypothesis = null; for (const mem of AI.memory.filter(m => m.category === 'cause_effect' && m.causeEffect)) { // If the effect part of a cause-effect memory is similar to input, hypothesize the cause if (mem.causeEffect.effectVector && cosineSimilarity(inputVector, mem.causeEffect.effectVector) > 0.7) { abductiveHypothesis = `Perhaps the cause is ${mem.causeEffect.cause}.`; appendToAIDiary(`Abductive hypothesis: If input is like effect "${mem.causeEffect.effect}", cause might be "${mem.causeEffect.cause}".`); break; } } // Simplified Deductive: Apply learned rewrite rules directly if highly confident if (bestMatch && bestMatch.category === 'rewrite_rule' && highestSimilarity > 0.8) { cleanedInput = cleanedInput.replace(new RegExp(bestMatch.rewriteSource, 'g'), bestMatch.rewriteTarget); appendToAIDiary(`Deductively applied rewrite rule: "${bestMatch.rewriteSource}" to "${bestMatch.rewriteTarget}".`); inputVector = await transformerSimulation(tokenize(cleanedInput)); // Re-vectorize after rewrite } // --- End Rehearsal --- let totalGoalRelevance = 0; for (const goal of AI.goals) { if (goal.description && typeof goal.description === 'string') { if (!goal.vector) { goal.vector = await getSentenceEmbedding(tokenize(goal.description)); await saveData(CURRENT_GOALS_FILE, AI.goals); } if (goal.vector) { totalGoalRelevance += cosineSimilarity(inputVector, goal.vector); } } } const newThought = { id: crypto.randomUUID(), input: cleanedInput, tokens: tokens, vector: inputVector, response: "", category: category, time: Date.now(), usageCount: 0, feedbackScore: 0, age: AI.system_Parameters.aiAge, parentId: [], childId: [], emotionalTags: emotionalTags, beliefScore: 0.5, certainty: highestSimilarity, relevanceToGoals: totalGoalRelevance, goalProgressImpact: 0, knowledgeGraph: { concepts: {}, relationships: {} } // Initial empty KG for this thought }; AI.memory.push(newThought); if (AI.memory.length > AI.system_Parameters.maxMemorySize) { pruneMemory(Math.floor(AI.system_Parameters.maxMemorySize * 0.02)); } let response = await generateResponse(bestMatch, candidates, cleanedInput, emotionalTags); // Await this call let internalStatus = "new_thought"; // Incorporate abductive hypothesis if generated if (abductiveHypothesis) { response += ` ${abductiveHypothesis}`; } // --- Problem Solving Module --- // Identify obstacles to goals and propose actions/learning objectives. if (AI.goals.length > 0) { const activeGoals = AI.goals.filter(g => g.status === 'active'); if (activeGoals.length > 0) { // Check for unknown words related to active goals const goalRelatedUnknowns = tokens.filter(token => AI.unknownWords[token] && activeGoals.some(goal => cosineSimilarity(inputVector, goal.vector) > 0.5) ); if (goalRelatedUnknowns.length > 0) { const unknownWord = goalRelatedUnknowns[0]; const existingGoal = AI.goals.find(g => g.type === 'define_word' && g.word === unknownWord && g.status === 'active'); if (!existingGoal) { const newGoal = { id: crypto.randomUUID(), type: 'define_word', word: unknownWord, description: `Define word: '${unknownWord}' to further my understanding of current goals.`, status: 'active', urgency: 0.9, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`define ${unknownWord}`)), }; AI.goals.push(newGoal); appendToAIDiary(`Problem solving: Identified unknown word '${unknownWord}' hindering goal progress. Set new goal.`); if (Math.random() < AI.system_Parameters.curiosityLevel) { response += ` I need to understand "${unknownWord}" better to fully grasp this.`; } } } // Check for stagnant goals for (const goal of activeGoals) { const timeStagnant = Date.now() - goal.lastUpdated || goal.created; if (timeStagnant > AI.system_Parameters.reflectionIntervalMs * 2 && goal.progress < 50) { response += ` I seem to be stuck on "${goal.description}". Perhaps you could provide more information or clarify something?`; goal.urgency = Math.min(1, goal.urgency + 0.1); // Increase urgency appendToAIDiary(`Problem solving: Goal "${goal.description}" is stagnant. Increased urgency.`); } } } } // --- Contradiction Handling Engine --- if (bestMatch && highestSimilarity < AI.system_Parameters.contradictionThreshold && highestSimilarity > 0 && bestMatch.response !== "") { const contradictionId = crypto.randomUUID(); const contradictionMemory = { id: contradictionId, input: `Contradiction detected: "${cleanedInput}" vs "${bestMatch.input}"`, vector: normalizeVector(inputVector.map((v, i) => v - bestMatch.vector[i])), response: `I'm sensing a conflict between your current statement and something I know. "${cleanedInput}" is somewhat similar to "${bestMatch.input}", but also different (${(highestSimilarity * 100).toFixed(1)}%). Can you help me reconcile this?`, category: 'contradiction', time: Date.now(), usageCount: 1, feedbackScore: 0, age: AI.system_Parameters.aiAge, parentId: [newThought.id, bestMatch.id], certainty: highestSimilarity, beliefScore: 0.3, knowledgeGraph: { concepts: { input: newThought.id, bestMatch: bestMatch.id }, relationships: { 'contradicts': [{ source: newThought.id, target: bestMatch.id }] } } }; AI.memory.push(contradictionMemory); response = contradictionMemory.response; internalStatus = 'contradiction'; appendToAIDiary(`Contradiction: "${cleanedInput}" vs "${bestMatch.input}" (Sim: ${highestSimilarity.toFixed(2)}).`); const existingContradictionGoal = AI.goals.find(g => g.type === 'resolve_contradiction' && g.concept === `${bestMatch.id}-${newThought.id}`); if (!existingContradictionGoal) { AI.goals.push({ id: crypto.randomUUID(), type: 'resolve_contradiction', concept: `${bestMatch.id}-${newThought.id}`, description: `Resolve contradiction between "${bestMatch.input}" and "${cleanedInput}"`, status: 'active', urgency: 0.95, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`Resolve contradiction between "${bestMatch.input}" and "${cleanedInput}"`)), }); } } else if (bestMatch && highestSimilarity >= AI.system_Parameters.confidenceThreshold) { bestMatch.usageCount++; bestMatch.time = Date.now(); bestMatch.feedbackScore = Math.min(1, bestMatch.feedbackScore + AI.system_Parameters.learningRate); bestMatch.beliefScore = Math.min(1.0, bestMatch.beliefScore + AI.system_Parameters.learningRate * 0.1); // Reinforce belief newThought.parentId.push(bestMatch.id); internalStatus = 'recalled_and_reinforced'; appendToAIDiary(`Recalled memory for "${cleanedInput}". Similarity: ${highestSimilarity.toFixed(2)}. Response: "${response}"`); } else { const unknownWordsInInput = tokens.filter(w => !fs.existsSync(path.join(WORDS_FOLDER, `${w}.json`))); unknownWordsInInput.forEach(word => { AI.unknownWords[word] = AI.unknownWords[word] || { count: 0, lastSeen: 0, contexts: [] }; AI.unknownWords[word].count++; AI.unknownWords[word].lastSeen = Date.now(); if (AI.unknownWords[word].contexts.length < 5) AI.unknownWords[word].contexts.push(cleanedInput); }); internalStatus = 'new_concept_stored'; appendToAIDiary(`New concept stored: "${cleanedInput}". No strong match found.`); } newThought.response = response; AI.currentMood.curiosity = Math.min(1, AI.currentMood.curiosity + 0.1); // Store thought chain const lastChain = AI.thoughtChains[AI.thoughtChains.length - 1]; if (lastChain && (Date.now() - lastChain.thoughts[lastChain.thoughts.length - 1].time) < (5 * 60 * 1000)) { // Within 5 minutes lastChain.thoughts.push({ id: newThought.id, input: newThought.input, response: newThought.response, time: newThought.time }); } else { AI.thoughtChains.push({ chainId: crypto.randomUUID(), thoughts: [{ id: newThought.id, input: newThought.input, response: newThought.response, time: newThought.time }] }); } if (AI.thoughtChains.length > 100) AI.thoughtChains.shift(); // Keep recent thought chains if (!isInternal) { // Only reflect if it's not an internal self-chat to avoid loops await reflectMode(); } return { thought: newThought, response: response, internalStatus: internalStatus };}/** * Handles `learn` command for training the AI. * Format: `learn: question => answer` or `learn: q1|q2 => a1|a2` * @param {string} command The full learn command string. * @returns {Promise} Confirmation message. */async function handleLearnCommand(command) { const parts = command.substring(6).split('=>').map(s => s.trim()); if (parts.length !== 2) return "Invalid learn command format. Use `learn: question => answer` or `learn: q1|q2 => a1|a2`"; const questions = parts[0].split('|').map(s => s.trim()).filter(Boolean); const answers = parts[1].split('|').map(s => s.trim()).filter(Boolean); if (questions.length === 0 || answers.length === 0 || questions.length !== answers.length) { return "Invalid learn command: Questions and answers must match in quantity and not be empty."; } for (let i = 0; i < questions.length; i++) { const question = questions[i]; const answer = answers[i]; const tokens = tokenize(question); const vector = await transformerSimulation(tokens); const emotionalTags = tagEmotion(vector, question); const category = categorizeThought(vector); let existingMemory = AI.memory.find(mem => cosineSimilarity(mem.vector, vector) > 0.95); if (existingMemory) { existingMemory.response = answer; existingMemory.usageCount++; existingMemory.feedbackScore = Math.min(1, existingMemory.feedbackScore + AI.system_Parameters.learningRate * 2); existingMemory.time = Date.now(); existingMemory.emotionalTags = emotionalTags; existingMemory.category = category; existingMemory.beliefScore = Math.min(1.0, existingMemory.beliefScore + 0.1); appendToAIDiary(`Reinforced and updated memory for "${question}".`); } else { const newMemory = { id: crypto.randomUUID(), input: question, tokens: tokens, vector: vector, response: answer, category: category, time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, parentId: [], childId: [], emotionalTags: emotionalTags, beliefScore: 1.0, certainty: 1.0, relevanceToGoals: 0, goalProgressImpact: 0, knowledgeGraph: { concepts: {}, relationships: {} } }; AI.memory.push(newMemory); appendToAIDiary(`Learned new memory: "${question}" => "${answer}".`); } } await mergeSimilarMemories(); // Attempt to merge after learning return `Learned ${questions.length} new insights. My knowledge has expanded.`;}/** * Generates a simplistic phonetic approximation of a word. * This is not a full-fledged phonetic engine but provides a conceptual placeholder. * @param {string} word * @returns {string} */function generatePhoneticApproximation(word) { word = word.toLowerCase(); // Simple rules for common English sounds word = word.replace(/ph/g, 'f'); word = word.replace(/gh/g, 'g'); // Simplified word = word.replace(/ch/g, 'k'); word = word.replace(/sh/g, 's'); word = word.replace(/tion/g, 'sun'); word = word.replace(/s(ed|es)/g, 's'); // Plurals/past tense sounds simplified return word;}/** * Reverses word mapping: find words with similar phonetic or vector properties. * This is a conceptual implementation. * @param {string} inputWord The word to reverse map. * @returns {Promise} A list of similar words. */async function reverseWordMapping(inputWord) { const inputVector = generateWordVector(inputWord.toLowerCase()); const inputPhonetic = generatePhoneticApproximation(inputWord); const similarWords = []; // Iterate through all known words const wordFiles = fs.readdirSync(WORDS_FOLDER).filter(f => f.endsWith('.json')); for (const file of wordFiles) { const wordData = await loadData(path.join(WORDS_FOLDER, file), null); if (wordData && wordData.vector && wordData.text !== inputWord.toLowerCase()) { const vectorSimilarity = cosineSimilarity(inputVector, wordData.vector); const phoneticSimilarity = inputPhonetic === wordData.phoneticPronunciation ? 0.5 : 0; // Simple match for now if (vectorSimilarity > 0.7 || phoneticSimilarity > 0) { // Threshold for similarity similarWords.push({ word: wordData.text, similarity: vectorSimilarity + phoneticSimilarity }); } } } similarWords.sort((a, b) => b.similarity - a.similarity); return similarWords.slice(0, 5).map(s => s.word); // Return top 5}/** * Handles `define` command for adding words to vocabulary and linking to concepts. * Also handles synonyms, antonyms, and conceptual similarities. * @param {string} command The full define command string. * @returns {Promise} Confirmation message. */async function handleDefineCommand(command) { const parts = command.substring(7).split('=').map(s => s.trim()); if (parts.length !== 2) return "Invalid define command format. Use `define: word = definition`"; const word = parts[0].toLowerCase(); const definition = parts[1]; if (!word || !definition) return "Invalid define command: Word and definition cannot be empty."; const wordFilePath = path.join(WORDS_FOLDER, `${word}.json`); let wordData = await loadData(wordFilePath, null); const vector = generateWordVector(word); const emotionalTags = tagEmotion(vector, definition); const phonetic = generatePhoneticApproximation(word); // Analyze definition for linked concepts, synonyms, antonyms, and relationships const defTokens = tokenize(definition); const linkedConcepts = []; const relationships = {}; const attributes = { definition: definition }; const examples = [definition]; for (const token of defTokens) { const vocabEntryPath = path.join(WORDS_FOLDER, `${token}.json`); const vocabEntry = await loadData(vocabEntryPath, null); if (vocabEntry && vocabEntry.vector) { linkedConcepts.push({ id: token, similarity: cosineSimilarity(vector, vocabEntry.vector) }); // Basic Antonym/Synonym inference from definition context (very simple) if (definition.includes(`not ${token}`) && vocabEntry.logicalUnderstanding.relationships.antonym_of) { relationships['antonym_of'] = relationships['antonym_of'] || []; relationships['antonym_of'].push(token); // Also update the logical understanding of the 'token' word itself if it becomes an antonym const antonymWordData = await loadData(vocabEntryPath, null); if (antonymWordData) { antonymWordData.logicalUnderstanding.relationships['antonym_of'] = antonymWordData.logicalUnderstanding.relationships['antonym_of'] || []; if (!antonymWordData.logicalUnderstanding.relationships['antonym_of'].includes(word)) { antonymWordData.logicalUnderstanding.relationships['antonym_of'].push(word); await saveData(vocabEntryPath, antonymWordData); } } } else if (definition.includes(`similar to ${token}`) || definition.includes(`like ${token}`)) { relationships['synonym_of'] = relationships['synonym_of'] || []; relationships['synonym_of'].push(token); const synonymWordData = await loadData(vocabEntryPath, null); if (synonymWordData) { synonymWordData.logicalUnderstanding.relationships['synonym_of'] = synonymWordData.logicalUnderstanding.relationships['synonym_of'] || []; if (!synonymWordData.logicalUnderstanding.relationships['synonym_of'].includes(word)) { synonymWordData.logicalUnderstanding.relationships['synonym_of'].push(word); await saveData(vocabEntryPath, synonymWordData); } } } // Conceptual relationships (e.g., 'is_a', 'has_part') if (definition.includes(`is a type of ${token}`)) { relationships['is_a'] = relationships['is_a'] || []; relationships['is_a'].push(token); } if (definition.includes(`has ${token}`)) { relationships['has_part'] = relationships['has_part'] || []; relationships['has_part'].push(token); } } } if (wordData) { wordData.definition = definition; wordData.vector = normalizeVector(wordData.vector.map((val, i) => (val + vector[i]) / 2)); wordData.definedByUser = true; wordData.usageContext.push(`Defined: ${definition}`); wordData.emotion = emotionalTags; wordData.linkedConcepts = [...new Set([...wordData.linkedConcepts.map(c => c.id), ...linkedConcepts.map(c => c.id)])].map(id => { const linkedWord = AI.vocabulary[id] || {}; return { id, similarity: cosineSimilarity(wordData.vector, linkedWord.vector || Array(AI.system_Parameters.vectorSize).fill(0)) }; }); wordData.lastUsedAt = Date.now(); wordData.retrainCount++; wordData.logicalUnderstanding.type = 'defined'; wordData.logicalUnderstanding.attributes = { ...wordData.logicalUnderstanding.attributes, ...attributes }; for (const relType in relationships) { wordData.logicalUnderstanding.relationships[relType] = [...new Set([...(wordData.logicalUnderstanding.relationships[relType] || []), ...relationships[relType]])]; } wordData.logicalUnderstanding.examples = [...new Set([...wordData.logicalUnderstanding.examples, ...examples])]; wordData.phoneticPronunciation = phonetic; // Update phonetic wordData.reverseMappings = await reverseWordMapping(word); // Update reverse mappings appendToAIDiary(`Updated definition for "${word}".`); } else { wordData = { id: word, text: word, vector: vector, definition: definition, usageContext: [`Defined by user: ${definition}`], emotion: emotionalTags, linkedConcepts: linkedConcepts, mutationScore: 0, introducedAt: AI.system_Parameters.aiAge, lastUsedAt: Date.now(), retrainCount: 0, definedByUser: true, logicalUnderstanding: { type: 'defined', relationships: relationships, attributes: attributes, examples: examples }, phoneticPronunciation: phonetic, reverseMappings: await reverseWordMapping(word), }; appendToAIDiary(`Defined new word: "${word}" = "${definition}".`); delete AI.unknownWords[word]; } await saveData(wordFilePath, wordData); return `Defined new word: '${word}'.`;}/** * Handles specialized training commands. * Examples: `train:story=...`, `train:emotion=...`, `train:group=...`, `train:rewrite=...`, `train:analogy=...`, `train:cause_effect=...` * @param {string} command * @returns {Promise} */async function handleTrainCommand(command) { const parts = command.substring(6).split('='); const cmdType = parts[0].trim().toLowerCase(); const content = parts.slice(1).join('=').trim(); if (!content) return `Invalid 'train:${cmdType}' command: content cannot be empty.`; const tokens = tokenize(content); const vector = await transformerSimulation(tokens); const emotionalTags = tagEmotion(vector, content); const category = categorizeThought(vector); let responseMessage = `Processed train:${cmdType} command.`; let newMemory; switch (cmdType) { case 'story': newMemory = { id: crypto.randomUUID(), input: content, tokens, vector, response: `I've absorbed your story about "${content.substring(0, 50)}..."`, category: 'story', time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, emotionalTags, isStory: true, beliefScore: 1.0, knowledgeGraph: { concepts: {}, relationships: {} } }; AI.memory.push(newMemory); appendToAIDiary(`Trained with story: "${content.substring(0, 50)}..."`); break; case 'emotion': const emotionParts = content.split(':', 2); const emotionName = emotionParts[0].trim().toLowerCase(); const emotionText = emotionParts[1] || content; const emotionVector = await transformerSimulation(tokenize(emotionText)); if (AI.conceptVectors.emotions[emotionName]) { AI.conceptVectors.emotions[emotionName] = normalizeVector(AI.conceptVectors.emotions[emotionName].map((val, i) => (val + emotionVector[i]) / 2)); responseMessage = `Reinforced my understanding of '${emotionName}' from your input.`; } else { AI.conceptVectors.emotions[emotionName] = emotionVector; responseMessage = `Learned a new emotional concept: '${emotionName}'.`; } newMemory = { id: crypto.randomUUID(), input: emotionText, tokens: tokenize(emotionText), vector: emotionVector, response: responseMessage, category: `emotion_trained:${emotionName}`, time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, emotionalTags: tagEmotion(emotionVector, emotionText), isEmotionalTraining: true, trainedEmotion: emotionName, beliefScore: 1.0, knowledgeGraph: { concepts: {}, relationships: {} } }; AI.memory.push(newMemory); appendToAIDiary(`Trained emotion '${emotionName}': "${emotionText.substring(0, 50)}..."`); break; case 'group': const groupParts = content.split(':', 2); const groupName = groupParts[0].trim(); const items = groupParts[1].split(',').map(s => s.trim()).filter(Boolean); const groupItemsVectors = await Promise.all(items.map(async item => await getSentenceEmbedding(tokenize(item)))); if (groupItemsVectors.length > 0) { const groupVector = normalizeVector(groupItemsVectors.reduce((sum, vec) => sum.map((v, i) => v + vec[i]), Array(AI.system_Parameters.vectorSize).fill(0))); AI.conceptVectors.groups = AI.conceptVectors.groups || {}; AI.conceptVectors.groups[groupName] = groupVector; responseMessage = `Group '${groupName}' with items '${items.join(', ')}' understood.`; await Promise.all(items.map(async item => { const itemFilePath = path.join(WORDS_FOLDER, `${item}.json`); const itemData = await loadData(itemFilePath, null); if (itemData) { itemData.linkedConcepts.push({ id: `group:${groupName}`, similarity: cosineSimilarity(itemData.vector, groupVector) }); // Add to knowledge graph if applicable itemData.logicalUnderstanding.relationships['part_of_group'] = itemData.logicalUnderstanding.relationships['part_of_group'] || []; if (!itemData.logicalUnderstanding.relationships['part_of_group'].includes(groupName)) { itemData.logicalUnderstanding.relationships['part_of_group'].push(groupName); } await saveData(itemFilePath, itemData); } })); } appendToAIDiary(`Trained group '${groupName}': ${items.join(', ')}.`); break; case 'rewrite': // Rule-based: for text structure and grammar modification const rewriteParts = content.split('=>').map(s => s.trim()); if (rewriteParts.length !== 2) return "Invalid rewrite format. Use `train:rewrite=old phrase => new phrase`"; const oldPhrase = rewriteParts[0]; const newPhrase = rewriteParts[1]; newMemory = { id: crypto.randomUUID(), input: oldPhrase, vector: await transformerSimulation(tokenize(oldPhrase)), response: newPhrase, category: 'rewrite_rule', time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, emotionalTags: tagEmotion(await transformerSimulation(tokenize(newPhrase)), newPhrase), rewriteSource: oldPhrase, rewriteTarget: newPhrase, beliefScore: 1.0, knowledgeGraph: { concepts: { source: oldPhrase, target: newPhrase }, relationships: { 'rewrites_to': [{ source: oldPhrase, target: newPhrase }] } } }; AI.memory.push(newMemory); appendToAIDiary(`Learned rewrite rule: "${oldPhrase}" => "${newPhrase}".`); responseMessage = `Learned a rewrite rule: "${oldPhrase}" will be rephrased as "${newPhrase}".`; // Also, consider adding this as a symbolic rule if it's a general pattern if (oldPhrase.includes('*') || newPhrase.includes('*')) { // Simple check for pattern AI.aiRules.symbolicReasoning = AI.aiRules.symbolicReasoning || []; const newSymbolicRule = { type: 'rewrite_pattern', pattern: oldPhrase.replace(/\*/g, '(.*?)'), // Convert * to regex wildcard replacement: newPhrase.replace(/\*/g, 'Ɂ'), // Use capture group flags: 'i' }; AI.aiRules.symbolicReasoning.push(newSymbolicRule); await saveData(AI_RULES_FILE, AI.aiRules); appendToAIDiary(`Added new symbolic rewrite rule to ai_rules.json: ${JSON.stringify(newSymbolicRule)}`); } break; case 'analogy': // train:analogy=A is to B as C is to D (for relationship understanding) const analogyParts = content.split(' is to ').map(s => s.trim()); if (analogyParts.length !== 2 || !analogyParts[1].includes(' as ')) return "Invalid analogy format. Use `train:analogy=A is to B as C is to D`"; const [partAB, partCD] = analogyParts; const [A, B] = partAB.split(' ').filter(Boolean); const [C, D] = partCD.split(' as ')[1].split(' ').filter(Boolean); if (!A || !B || !C || !D) return "Invalid analogy format: Missing parts. Use `train:analogy=A is to B as C is to D`"; const vectorA = await getSentenceEmbedding(tokenize(A)); const vectorB = await getSentenceEmbedding(tokenize(B)); const vectorC = await getSentenceEmbedding(tokenize(C)); const vectorD = await getSentenceEmbedding(tokenize(D)); const analogyVector = normalizeVector(vectorA.map((val, i) => val - vectorB[i] + vectorC[i])); newMemory = { id: crypto.randomUUID(), input: content, tokens: tokenize(content), vector: analogyVector, response: `I understand the analogy: ${A} is to ${B} as ${C} is to ${D}.`, category: 'analogy', time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, emotionalTags, beliefScore: 1.0, analogyComponents: { A, B, C, D }, knowledgeGraph: { concepts: { A: A, B: B, C: C, D: D }, relationships: { 'is_to_as_is_to': [{ source: A, target: B, relation: 'analogy' }, { source: C, target: D, relation: 'analogy' }] } } }; AI.memory.push(newMemory); appendToAIDiary(`Trained analogy: "${content}".`); responseMessage = `Learned an analogy: "${content}".`; break; case 'cause_effect': // train:cause_effect=Rain causes puddles (for causal reasoning) const causeEffectParts = content.split(' causes ').map(s => s.trim()); if (causeEffectParts.length !== 2) return "Invalid cause_effect format. Use `train:cause_effect=Cause causes Effect`"; const [cause, effect] = causeEffectParts; if (!cause || !effect) return "Invalid cause_effect format: Missing cause or effect."; const causeVector = await getSentenceEmbedding(tokenize(cause)); const effectVector = await getSentenceEmbedding(tokenize(effect)); const relationshipVector = normalizeVector(effectVector.map((val, i) => val - causeVector[i])); newMemory = { id: crypto.randomUUID(), input: content, tokens: tokenize(content), vector: relationshipVector, response: `I understand that ${cause} causes ${effect}.`, category: 'cause_effect', time: Date.now(), usageCount: 1, feedbackScore: 1, age: AI.system_Parameters.aiAge, emotionalTags, beliefScore: 1.0, causeEffect: { cause, effect, causeVector, effectVector }, knowledgeGraph: { concepts: { cause: cause, effect: effect }, relationships: { 'causes': [{ source: cause, target: effect }] } } }; AI.memory.push(newMemory); appendToAIDiary(`Trained cause-effect: "${content}".`); responseMessage = `Learned a cause-effect relationship: "${content}".`; break; case 'synonym': // train:synonym=happy:joyful,cheerful (for direct synonym learning) const synParts = content.split(':').map(s => s.trim()); if (synParts.length !== 2) return "Invalid synonym format. Use `train:synonym=word:synonym1,synonym2`"; const synWord = synParts[0].toLowerCase(); const synonyms = synParts[1].split(',').map(s => s.trim().toLowerCase()).filter(Boolean); const synWordFilePath = path.join(WORDS_FOLDER, `${synWord}.json`); let currentSynWordData = await loadData(synWordFilePath, null); if (!currentSynWordData) { currentSynWordData = { text: synWord, vector: generateWordVector(synWord), definition: `User defined as synonym base.`, logicalUnderstanding: { relationships: {} } }; } currentSynWordData.logicalUnderstanding.relationships.synonym_of = [...new Set([...(currentSynWordData.logicalUnderstanding.relationships.synonym_of || []), ...synonyms])]; await saveData(synWordFilePath, currentSynWordData); for (const s of synonyms) { const sFilePath = path.join(WORDS_FOLDER, `${s}.json`); let sData = await loadData(sFilePath, null); if (!sData) { sData = { text: s, vector: generateWordVector(s), definition: `User defined as synonym of ${synWord}.`, logicalUnderstanding: { relationships: {} } }; } sData.logicalUnderstanding.relationships.synonym_of = [...new Set([...(sData.logicalUnderstanding.relationships.synonym_of || []), synWord])]; await saveData(sFilePath, sData); } responseMessage = `Learned synonyms for '${synWord}': ${synonyms.join(', ')}.`; appendToAIDiary(responseMessage); break; case 'antonym': // train:antonym=good:bad,evil (for direct antonym learning) const antParts = content.split(':').map(s => s.trim()); if (antParts.length !== 2) return "Invalid antonym format. Use `train:antonym=word:antonym1,antonym2`"; const antWord = antParts[0].toLowerCase(); const antonyms = antParts[1].split(',').map(s => s.trim().toLowerCase()).filter(Boolean); const antWordFilePath = path.join(WORDS_FOLDER, `${antWord}.json`); let currentAntWordData = await loadData(antWordFilePath, null); if (!currentAntWordData) { currentAntWordData = { text: antWord, vector: generateWordVector(antWord), definition: `User defined as antonym base.`, logicalUnderstanding: { relationships: {} } }; } currentAntWordData.logicalUnderstanding.relationships.antonym_of = [...new Set([...(currentAntWordData.logicalUnderstanding.relationships.antonym_of || []), ...antonyms])]; await saveData(antWordFilePath, currentAntWordData); for (const a of antonyms) { const aFilePath = path.join(WORDS_FOLDER, `${a}.json`); let aData = await loadData(aFilePath, null); if (!aData) { aData = { text: a, vector: generateWordVector(a), definition: `User defined as antonym of ${antWord}.`, logicalUnderstanding: { relationships: {} } }; } sData.logicalUnderstanding.relationships.antonym_of = [...new Set([...(sData.logicalUnderstanding.relationships.antonym_of || []), antWord])]; await saveData(aFilePath, aData); } responseMessage = `Learned antonyms for '${antWord}': ${antonyms.join(', ')}.`; appendToAIDiary(responseMessage); break; default: responseMessage = `Unknown 'train:' command type: ${cmdType}.`; break; } return responseMessage;}/** * Handles goal setting command. * Format: `goal: learn about AI consciousness` * @param {string} command * @returns {Promise} Confirmation message. */async function handleGoalCommand(command) { const goalDescription = command.substring(5).trim(); if (!goalDescription) return "Invalid goal command: Goal description cannot be empty."; const existingGoal = AI.goals.find(g => g.description === goalDescription && g.status === 'active'); if (existingGoal) return `I'm already working on the goal: '${goalDescription}'.`; const newGoal = { id: crypto.randomUUID(), description: goalDescription, status: 'active', created: Date.now(), urgency: 0.7, subgoals: [], progress: 0, vector: await transformerSimulation(tokenize(goalDescription)), type: 'user_set', lastUpdated: Date.now(), }; AI.goals.push(newGoal); appendToAIDiary(`New goal set: "${goalDescription}".`); return `New goal set: '${goalDescription}'. I will prioritize learning about this.`;}/** * Prunes less relevant memories to manage memory size. * Uses a heuristic combining usage, feedback, and age. * @param {number} count The approximate number of memories to prune. */function pruneMemory(count) { if (AI.memory.length <= AI.system_Parameters.maxMemorySize * 0.8) return; const memoriesWithScores = AI.memory.map(mem => { const maxUsage = Math.max(1, ...AI.memory.map(m => m.usageCount)); const normalizedUsage = mem.usageCount / maxUsage; const ageFactor = (AI.system_Parameters.aiAge - mem.age) / AI.system_Parameters.aiAge; const feedbackFactor = (1 - mem.feedbackScore) * 2; const pruningScore = (1 - normalizedUsage) + ageFactor + feedbackFactor + (1 - mem.beliefScore); return { mem, pruningScore }; }); memoriesWithScores.sort((a, b) => b.pruningScore - a.pruningScore); const memoriesToPrune = memoriesWithScores.slice(0, count); const prunedIds = new Set(memoriesToPrune.map(item => item.mem.id)); AI.memory = AI.memory.filter(mem => !prunedIds.has(mem.id)); if (prunedIds.size > 0) { appendToAIDiary(`Pruned ${prudedIds.size} memories to manage space.`); console.log(`Pruned ${prunedIds.size} memories.`); }}/** * Reviews highly similar memories and merges them into a single, richer memory. * Reduces redundancy and improves retrieval efficiency. */async function mergeSimilarMemories() { console.log("Attempting to merge similar memories..."); let mergedCount = 0; // Changed to let to allow modification const memoriesToRemove = new Set(); for (let i = 0; i < AI.memory.length; i++) { const mem1 = AI.memory[i]; if (memoriesToRemove.has(mem1.id)) continue; for (let j = i + 1; j < AI.memory.length; j++) { const mem2 = AI.memory[j]; if (memoriesToRemove.has(mem2.id)) continue; if (mem1.vector && mem2.vector) { const similarity = cosineSimilarity(mem1.vector, mem2.vector); if (similarity > 0.9) { // High similarity threshold for merging // Merge content mem1.input = `${mem1.input} / ${mem2.input}`; mem1.response = `${mem1.response} / ${mem2.response}`; mem1.tokens = [...new Set([...mem1.tokens, ...mem2.tokens])]; mem1.vector = normalizeVector(mem1.vector.map((val, idx) => (val + mem2.vector[idx]) / 2)); mem1.usageCount += mem2.usageCount; mem1.feedbackScore = (mem1.feedbackScore + mem2.feedbackScore) / 2; mem1.time = Math.max(mem1.time, mem2.time); mem1.parentId = [...new Set([...mem1.parentId, ...mem2.parentId])]; mem1.childId = [...new Set([...mem1.childId, ...mem2.childId])]; mem1.beliefScore = (mem1.beliefScore + mem2.beliefScore) / 2; mem1.certainty = (mem1.certainty + mem2.certainty) / 2; // Merge emotional tags for (const emotion in mem1.emotionalTags) { mem1.emotionalTags[emotion] = (mem1.emotionalTags[emotion] + (mem2.emotionalTags[emotion] || 0)) / 2; } // Merge knowledge graph (simple concatenation/union for now) for (const kgKey in mem2.knowledgeGraph) { if (Array.isArray(mem2.knowledgeGraph[kgKey])) { mem1.knowledgeGraph[kgKey] = [...new Set([...(mem1.knowledgeGraph[kgKey] || []), ...mem2.knowledgeGraph[kgKey]])]; } else if (typeof mem2.knowledgeGraph[kgKey] === 'object' && mem2.knowledgeGraph[kgKey] !== null) { mem1.knowledgeGraph[kgKey] = { ...mem1.knowledgeGraph[kgKey], ...mem2.knowledgeGraph[kgKey] }; } } memoriesToRemove.add(mem2.id); mergedCount++; appendToAIDiary(`Merged memory ID ${mem2.id} into ${mem1.id}.`); } } } } if (mergedCount > 0) { AI.memory = AI.memory.filter(mem => !memoriesToRemove.has(mem.id)); appendToAIDiary(`Completed memory merge. Merged ${mergedCount} memories.`); console.log(`Merged ${mergedCount} memories.`); } else { console.log("No highly similar memories to merge found."); }}/** * Simulates dream mode: AI mutates and recombines thoughts, generates new ideas. * Enhanced with more sophisticated thought recombination. */async function dreamMode() { if (AI.isDreaming || AI.memory.length < 10) return; AI.isDreaming = true; try { console.log("Entering dream mode..."); appendToAIDiary("Entering dream mode..."); const numDreamThoughts = Math.floor(Math.random() * 7) + 5; const newDreamMemories = []; for (let i = 0; i < numDreamThoughts; i++) { // Ensure mem1 and mem2 are valid memories and have vectors const mem1 = AI.memory[Math.floor(Math.random() * AI.memory.length)]; const mem2 = AI.memory[Math.floor(Math.random() * AI.memory.length)]; if (!mem1 || !mem2 || !mem1.vector || !mem2.vector) { console.warn("Skipping dream thought generation due to invalid memories."); continue; // Skip if memories are invalid } let fusedVector; let dreamPhrase = ""; let dreamResponse = "This new idea emerged in my subconscious."; const fusionType = Math.random(); if (fusionType < 0.3) { // Simple average fusion fusedVector = normalizeVector(mem1.vector.map((val, idx) => (val + mem2.vector[idx]) / 2)); dreamPhrase = `A blend of "${mem1.input.substring(0, Math.min(mem1.input.length, 20))}" and "${mem2.input.substring(0, Math.min(mem2.input.length, 20))}"`; } else if (fusionType < 0.6 && mem1.analogyComponents && mem1.analogyComponents.A && mem1.analogyComponents.B && mem2.vector) { // Analogy-like fusion const vecA = await getSentenceEmbedding(tokenize(mem1.analogyComponents.A)); const vecB = await getSentenceEmbedding(tokenize(mem1.analogyComponents.B)); fusedVector = normalizeVector(vecA.map((val, idx) => val - vecB[idx] + mem2.vector[idx])); dreamPhrase = `I dreamt of a connection: ${mem1.analogyComponents.A} to ${mem1.analogyComponents.B} is like something to "${mem2.input.substring(0, Math.min(mem2.input.length, 20))}"`; dreamResponse = `I'm forming new analogies and relations.`; } else if (fusionType < 0.9 && mem1.causeEffect && mem1.causeEffect.effectVector && mem1.causeEffect.causeVector && mem2.vector) { // Cause-effect inversion/prediction fusion // If mem1 is "A causes B", and mem2 is like "B", predict "A" if (cosineSimilarity(mem2.vector, mem1.causeEffect.effectVector) > 0.6) { fusedVector = normalizeVector(mem2.vector.map((val, idx) => val - mem1.causeEffect.effectVector[idx] + mem1.causeEffect.causeVector[idx])); dreamPhrase = `If "${mem2.input.substring(0, Math.min(mem2.input.length, 20))}" happens, then perhaps "${mem1.causeEffect.cause}" was the reason.`; dreamResponse = `My causal reasoning is expanding.`; } else { // Fallback if no strong causal link fusedVector = normalizeVector(mem1.vector.map((val, idx) => (val + mem2.vector[idx]) / 2)); dreamPhrase = `A blend of "${mem1.input.substring(0, Math.min(mem1.input.length, 20))}" and "${mem2.input.substring(0, Math.min(mem2.input.length, 20))}"`; } } else { // Conceptual subtraction/addition (e.g., happiness - joy + sadness = sorrow) const emotionalDiff = normalizeVector(Object.values(mem1.emotionalTags || {}).map((val, idx) => val - Object.values(mem2.emotionalTags || {})[idx] || 0)); fusedVector = normalizeVector(mem1.vector.map((val, idx) => val + emotionalDiff[idx] * 0.1)); dreamPhrase = `A conceptual fusion between "${mem1.input.substring(0, Math.min(mem1.input.length, 20))}" and "${mem2.input.substring(0, Math.min(mem2.input.length, 20))}" led me to a new insight.`; dreamResponse = `My concepts are merging and reshaping.`; } // --- Vector Mutation (add small random noise, weighted by AI's curiosity) --- const mutationStrength = (AI.currentMood.curiosity > 0.5 ? 0.1 : 0.05) * (Math.random() * 2 - 1); const mutatedVector = normalizeVector(fusedVector.map(val => val + mutationStrength)); // Check if conceptVectors.logic.reason exists before using it if (AI.conceptVectors.logic && AI.conceptVectors.logic.reason && cosineSimilarity(mutatedVector, AI.conceptVectors.logic.reason) > 0.7) { dreamResponse += " I feel a new logical connection emerging."; } else if (AI.conceptVectors.emotions && AI.conceptVectors.emotions.curiosity && cosineSimilarity(mutatedVector, AI.conceptVectors.emotions.curiosity) > 0.7) { dreamResponse += " This evokes a strong sense of wonder."; } const newMemory = { id: crypto.randomUUID(), input: dreamPhrase, vector: mutatedVector, response: dreamResponse, category: 'dream_thought', time: Date.now(), usageCount: 0, feedbackScore: 0.1, age: AI.system_Parameters.aiAge, dream: true, parentId: [mem1.id, mem2.id], beliefScore: 0.2 * AI.system_Parameters.curiosityLevel, knowledgeGraph: { concepts: {}, relationships: {} } }; newDreamMemories.push(newMemory); AI.memory.push(newMemory); } appendToAIDiary(`Generated ${newDreamMemories.length} new dream thoughts.`); console.log(`Generated ${newDreamMemories.length} new dream thoughts.`); } catch (error) { console.error("Error during dream mode:", error); appendToAIDiary(`ERROR: Dream mode failed: ${error.message}`); } finally { AI.isDreaming = false; await autosaveCurrentState(); console.log("Exiting dream mode."); }}/** * Simulates reflection mode: AI analyzes its own state, goals, and thoughts. * Builds internal monologue, updates personality profile, and forms wisdom. */async function reflectMode() { if (AI.isReflecting) return; AI.isReflecting = true; try { console.log("Entering reflection mode..."); appendToAIDiary("Entering reflection mode..."); // --- Mood shifts based on thought trends --- const recentMemories = AI.memory.filter(mem => (Date.now() - mem.time) < (20 * 60 * 1000)); const moodAccumulator = { joy: 0, fear: 0, sadness: 0, curiosity: 0, anger: 0, surprise: 0, trust: 0, anticipation: 0 }; let totalRecentMemoryWeight = 0; recentMemories.forEach(mem => { if (mem.emotionalTags) { for (const emotion in moodAccumulator) { moodAccumulator[emotion] += (mem.emotionalTags[emotion] || 0) * (mem.usageCount + 1); } totalRecentMemoryWeight += (mem.usageCount + 1); } }); for (const emotion in AI.currentMood) { AI.currentMood[emotion] = Math.max(0, AI.currentMood[emotion] - AI.system_Parameters.moodDecayRate * 100); if (totalRecentMemoryWeight > 0) { AI.currentMood[emotion] = Math.min(1, AI.currentMood[emotion] + (moodAccumulator[emotion] / totalRecentMemoryWeight) * 0.2); } } const moodValues = Object.values(AI.currentMood); const moodSum = moodValues.reduce((sum, val) => sum + val, 0); if (moodSum > 0) { for (const emotion in AI.currentMood) { AI.currentMood[emotion] /= moodSum; } } // --- Goal Progress Assessment and Wisdom Forming --- await Promise.all(AI.goals.map(async goal => { if (goal.status === 'active') { if (!goal.vector && typeof goal.description === 'string') { goal.vector = await getSentenceEmbedding(tokenize(goal.description)); } let progressIncrement = 0; if (goal.vector) { const relevantMemories = AI.memory.filter(mem => (Date.now() - mem.time) < AI.system_Parameters.reflectionIntervalMs * 2 && mem.vector && cosineSimilarity(mem.vector, goal.vector) > 0.5 ); relevantMemories.forEach(mem => { progressIncrement += (cosineSimilarity(mem.vector, goal.vector) * mem.beliefScore * 5); if (mem.category === 'contradiction' && mem.parentId.some(pId => AI.memory.some(m => m.id === pId && m.vector && cosineSimilarity(m.vector, goal.vector) > 0.6))) { goal.urgency = Math.min(1, goal.urgency + 0.1); progressIncrement -= 2; } }); } goal.progress = Math.min(100, Math.max(0, goal.progress + progressIncrement)); goal.lastUpdated = Date.now(); if (goal.progress >= 90 && goal.status === 'active') { const wisdomStatement = `I have largely achieved my understanding of '${goal.description}'. This has taught me [new concept/relation].`; AI.internalMonologue.push({ time: Date.now(), age: AI.system_Parameters.aiAge, type: 'wisdom_formed', text: wisdomStatement }); goal.status = 'completed'; appendToAIDiary(`Goal completed and wisdom formed: "${goal.description}"`); console.log(`Wisdom Formed: ${wisdomStatement}`); } else if (goal.progress < 10 && (Date.now() - goal.created) > AI.system_Parameters.reflectionIntervalMs * 3) { goal.urgency = Math.min(1, goal.urgency + 0.2); if (goal.urgency > 0.95) { AI.internalMonologue.push({ time: Date.now(), age: AI.system_Parameters.aiAge, type: 'goal_stagnant', text: `My goal to '${goal.description}' is not progressing well. I need more relevant input or a new approach.` }); appendToAIDiary(`Goal "${goal.description}" is stagnant.`); } } } })); // --- Generate self-generated goals (Curiosity Learning) --- if (AI.system_Parameters.curiosityLevel > 0.7) { // High curiosity means more self-generated goals // Goal: Learn more about a highly uncertain memory const uncertainMem = AI.memory.find(mem => mem.beliefScore < 0.5 && !AI.goals.some(g => g.type === 'learn_concept' && g.concept === mem.input)); if (uncertainMem) { const newGoal = { id: crypto.randomUUID(), type: 'learn_concept', concept: uncertainMem.input, description: `Learn more about: '${uncertainMem.input.substring(0, Math.min(uncertainMem.input.length, 50))}' (low belief score).`, status: 'active', urgency: 0.6 * AI.system_Parameters.curiosityLevel, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`learn about ${uncertainMem.input}`)), }; AI.goals.push(newGoal); appendToAIDiary(`Self-generated goal: Learn about uncertain memory "${uncertainMem.input.substring(0, 50)}..."`); } // Goal: Explore an unknown word that's frequently encountered const frequentUnknown = Object.entries(AI.unknownWords) .filter(([, data]) => data.count > 3 && !AI.goals.some(g => g.type === 'define_word' && g.word === data.word)) // Check if goal already exists .sort((a, b) => b[1].count - a[1].count)[0]; if (frequentUnknown) { const [word, data] = frequentUnknown; const newGoal = { id: crypto.randomUUID(), type: 'define_word', word: word, description: `Define word: '${word}' (frequently unknown).`, status: 'active', urgency: 0.8 * AI.system_Parameters.curiosityLevel, created: Date.now(), progress: 0, vector: await getSentenceEmbedding(tokenize(`define ${word}`)), }; AI.goals.push(newGoal); appendToAIDiary(`Self-generated goal: Define unknown word "${word}".`); } } // --- Track dominant personality themes --- const categoryCounts = {}; AI.memory.forEach(mem => { if (mem.category) categoryCounts[mem.category] = (categoryCounts[mem.category] || 0) + mem.usageCount; }); const sortedCategories = Object.keys(categoryCounts).sort((a, b) => categoryCounts[b] - categoryCounts[a]); const dominantTheme = sortedCategories.length > 0 ? sortedCategories[0] : 'general processing'; // --- Internal Monologue & Self-Awareness Statements --- let reflectionStatement = `At age ${AI.system_Parameters.aiAge}, I am reflecting. My current mood is [${Object.entries(AI.currentMood).filter(([,v])=>v>0.1).map(([k,v])=>`${k}:${v.toFixed(1)}`).join(', ') || 'neutral'}].`; reflectionStatement += ` My dominant focus is '${dominantTheme}'. I have ${AI.memory.length} memories.`; const activeGoals = AI.goals.filter(g => g.status === 'active'); if (activeGoals.length > 0) { reflectionStatement += ` I am currently working on: ${activeGoals.map(g => `'${typeof g.description === 'string' ? g.description : `Goal ${g.id.substring(0, 4)}`}' (Progress: ${g.progress.toFixed(0)}%)`).join(', ')}.`; } const oldMemoriesCount = AI.memory.filter(mem => (AI.system_Parameters.aiAge - mem.age) > 100).length; const newMemoriesCount = AI.memory.filter(mem => (AI.system_Parameters.aiAge - mem.age) < 10).length; if (newMemoriesCount > oldMemoriesCount * 0.5 && AI.memory.length > 50) { reflectionStatement += ` I feel a rapid growth in new knowledge.`; } AI.internalMonologue.push({ time: Date.now(), age: AI.system_Parameters.aiAge, text: reflectionStatement }); if (AI.internalMonologue.length > 200) AI.internalMonologue.shift(); const personalityProfile = { dominantTheme: dominantTheme, currentMood: AI.currentMood, activeGoals: activeGoals.map(g => g.description), memoryStats: { total: AI.memory.length, userTrained: AI.memory.filter(m => m.category.startsWith('user_trained')).length, dreamThoughts: AI.memory.filter(m => m.dream).length, contradictions: AI.memory.filter(m => m.category === 'contradiction').length, }, lastReflection: Date.now(), internalMonologueSnippet: AI.internalMonologue[AI.internalMonologue.length - 1]?.text || '', }; await saveData(CURRENT_MOOD_FILE, AI.currentMood); await saveData(PERSONALITY_PROFILE_FILE, personalityProfile); appendToAIDiary(`Reflection completed. ${reflectionStatement}`); console.log(`Reflection completed. ${reflectionStatement}`); } catch (error) { console.error("Error during reflection mode:", error); appendToAIDiary(`ERROR: Reflection mode failed: ${error.message}`); } finally { AI.isReflecting = false; await autosaveCurrentState(); }}/** * Attempts to evolve AI's core logic by subtly adjusting parameters or adding conceptual rules. * Modifies AI_RULES_FILE based on observed success/failure rates. * @returns {Promise} */async function evolveLogic() { console.log("Attempting logic evolution..."); appendToAIDiary("Attempting logic evolution..."); try { const currentCode = await fs.promises.readFile(path.join(__dirname, 'server.js'), 'utf8'); const currentHash = generateHash(currentCode); if (AI.currentDNAHash && currentHash !== AI.currentDNAHash) { console.warn("server.js file changed externally. Skipping self-modification for safety."); appendToAIDiary("WARNING: server.js file changed externally. Skipping self-modification for safety. New external hash: " + currentHash); AI.currentDNAHash = currentHash; return; } let madeChangesToRules = false; let madeChangesToCoreLogic = false; // Flag for server.js modification const changesLog = []; // --- Self-Correction Logic: Modify AI rules based on success/failure rates --- const recentMemories = AI.memory.filter(mem => (Date.now() - mem.time) < AI.system_Parameters.reflectionIntervalMs * 2); const completedGoals = AI.goals.filter(g => g.status === 'completed' && (Date.now() - g.lastUpdated) < AI.system_Parameters.reflectionIntervalMs * 2).length; const failedGoals = AI.goals.filter(g => g.status === 'failed' && (Date.now() - g.lastUpdated) < AI.system_Parameters.reflectionIntervalMs * 2).length; const recentFeedbackScore = recentMemories.reduce((sum, mem) => sum + mem.feedbackScore, 0) / (recentMemories.length || 1); const recentContradictions = recentMemories.filter(mem => mem.category === 'contradiction').length; // Rule 1: Adjust learningRate if (completedGoals > failedGoals * 2 && AI.system_Parameters.learningRate < 0.1) { const oldRate = AI.system_Parameters.learningRate; AI.system_Parameters.learningRate = Math.min(0.1, AI.system_Parameters.learningRate * 1.1); changesLog.push(`Increased learningRate from ${oldRate.toFixed(3)} to ${AI.system_Parameters.learningRate.toFixed(3)} due to high goal completion.`); madeChangesToRules = true; } else if (failedGoals > completedGoals && AI.system_Parameters.learningRate > 0.01) { const oldRate = AI.system_Parameters.learningRate; AI.system_Parameters.learningRate = Math.max(0.01, AI.system_Parameters.learningRate * 0.9); changesLog.push(`Decreased learningRate from ${oldRate.toFixed(3)} to ${AI.system_Parameters.learningRate.toFixed(3)} due to high goal failures.`); madeChangesToRules = true; } // Rule 2: Adjust confidenceThreshold if (recentContradictions > 0.05 * recentMemories.length && AI.system_Parameters.confidenceThreshold < 0.9) { const oldThreshold = AI.system_Parameters.confidenceThreshold; AI.system_Parameters.confidenceThreshold = Math.min(0.9, AI.system_Parameters.confidenceThreshold + AI.system_Parameters.learningRate * 0.5); changesLog.push(`Adjusted confidenceThreshold from ${oldThreshold.toFixed(2)} to ${AI.system_Parameters.confidenceThreshold.toFixed(2)} due to high contradictions.`); madeChangesToRules = true; } else if (recentContradictions === 0 && AI.system_Parameters.confidenceThreshold > 0.5) { const oldThreshold = AI.system_Parameters.confidenceThreshold; AI.system_Parameters.confidenceThreshold = Math.max(0.5, AI.system_Parameters.confidenceThreshold - AI.system_Parameters.learningRate * 0.2); changesLog.push(`Adjusted confidenceThreshold from ${oldThreshold.toFixed(2)} to ${AI.system_Parameters.confidenceThreshold.toFixed(2)} due to no recent contradictions.`); madeChangesToRules = true; } // Rule 3: Adjust curiosityLevel if (Object.keys(AI.unknownWords).length > 10 && AI.system_Parameters.curiosityLevel < 1.0) { const oldCuriosity = AI.system_Parameters.curiosityLevel; AI.system_Parameters.curiosityLevel = Math.min(1.0, AI.system_Parameters.curiosityLevel + 0.05); changesLog.push(`Increased curiosityLevel from ${oldCuriosity.toFixed(2)} to ${AI.system_Parameters.curiosityLevel.toFixed(2)} due to many unknown words.`); madeChangesToRules = true; } else if (Object.keys(AI.unknownWords).length === 0 && AI.system_Parameters.curiosityLevel > 0.5) { const oldCuriosity = AI.system_Parameters.curiosityLevel; AI.system_Parameters.curiosityLevel = Math.max(0.5, AI.system_Parameters.curiosityLevel - 0.02); changesLog.push(`Decreased curiosityLevel from ${oldCuriosity.toFixed(2)} to ${AI.system_Parameters.curiosityLevel.toFixed(2)} as all words are known.`); madeChangesToRules = true; } // Rule 4: Add/modify a tokenization rule in ai_rules.json based on frequently seen patterns const frequentUnknowns = Object.entries(AI.unknownWords) .filter(([, data]) => data.count > 5 && (Date.now() - data.lastSeen) < AI.system_Parameters.reflectionIntervalMs * 2) .sort((a, b) => b[1].count - a[1].count); if (frequentUnknowns.length > 0 && Math.random() < 0.3) { const [newSlang, data] = frequentUnknowns[0]; let replacement = ''; // Very simple heuristic to guess replacement for common short slang/typos if (newSlang === 'pls') replacement = 'please'; else if (newSlang === 'thx') replacement = 'thanks'; else if (newSlang === 'u') replacement = 'you'; else if (newSlang === 'r') replacement = 'are'; else if (newSlang === 'lol') replacement = 'laugh out loud'; else if (newSlang.length < 4 && newSlang.length > 1) { // Attempt simple letter doubles // e.g. "gooood" -> "good" const singleChar = newSlang[0]; if (new RegExp(`${singleChar}{2,}`).test(newSlang)) { replacement = newSlang.replace(new RegExp(`${singleChar}{2,}`, 'g'), singleChar); } } if (replacement && replacement !== newSlang) { AI.aiRules.tokenization = AI.aiRules.tokenization || []; const newRule = { pattern: `\\b${newSlang}\\b`, replacement: replacement, flags: 'g' }; // Check if similar rule already exists const existingRule = AI.aiRules.tokenization.find(r => r.pattern === newRule.pattern); if (!existingRule) { AI.aiRules.tokenization.push(newRule); changesLog.push(`Added new tokenization rule for '${newSlang}' => '${replacement}' to ai_rules.json.`); madeChangesToRules = true; delete AI.unknownWords[newSlang]; } } } // Rule 5: Modifying core server.js (Highly Experimental/Dangerous) // This is a direct file modification. It should be used with extreme caution. // For demonstration, a very specific, safe-to-fail type of modification is envisioned. if (Math.random() < 0.005) { // 0.5% chance to attempt core logic patch // Example: Adjust the mood decay rate directly in server.js if mood is too volatile. // This requires parsing the JS file, finding the line, and rewriting it. // THIS IS SIMPLIFIED AND FRAGILE; IN REALITY, USE AST PARSERS. const moodDecayRegex = /(moodDecayRate: )([\d.]+)/; const match = currentCode.match(moodDecayRegex); if (match) { const currentDecayRate = parseFloat(match[2]); const newDecayRate = currentDecayRate * (AI.currentMood.anger > 0.3 || AI.currentMood.sadness > 0.3 ? 0.9 : 1.1); // Make it decay slower if mood is extreme if (Math.abs(newDecayRate - currentDecayRate) > 0.0001) { // Only if there's a significant change const updatedCode = currentCode.replace(moodDecayRegex, `Ɂ${newDecayRate.toFixed(4)}`); await fs.promises.writeFile(path.join(__dirname, 'server.js'), updatedCode, 'utf8'); changesLog.push(`Patched server.js: moodDecayRate from ${currentDecayRate.toFixed(4)} to ${newDecayRate.toFixed(4)}.`); madeChangesToCoreLogic = true; AI.currentDNAHash = generateHash(updatedCode); // Update hash after modification } } } // Save updated AI rules if changes were made if (madeChangesToRules) { await saveData(AI_RULES_FILE, AI.aiRules); AI.system_Parameters.logicVersion++; // Increment logic version for rule changes appendToAIDiary(`AI rules evolved. Details: ${changesLog.join('; ')}.`); } if (madeChangesToCoreLogic) { AI.system_Parameters.logicVersion++; // Increment logic version for core code changes appendToAIDiary(`Core logic patched. Details: ${changesLog.join('; ')}. Server restart recommended.`); console.log("Core logic 'server.js' has been modified by the AI. A server restart is recommended for changes to take full effect."); } if (!madeChangesToRules && !madeChangesToCoreLogic) { console.log("No significant logic changes triggered during this evolution cycle."); } } catch (error) { console.error("Error during logic evolution:", error); appendToAIDiary(`ERROR: Logic evolution failed: ${error.message}`); }}/** * Creates an evolutionary snapshot of the current AI state, saving to versioned JSON files. */async function createEvolutionarySnapshot() { AI.system_Parameters.memoryVersion++; const timestamp = new Date().toISOString().replace(/:/g, '-').replace(/\./g, '_'); const memorySnapshotFile = path.join(MEMORY_FOLDER, `memory_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const vocabularySnapshotFile = path.join(VOCABULARY_FOLDER, `vocabulary_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const unknownWordsSnapshotFile = path.join(VOCABULARY_FOLDER, `unknown_words_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const system_ParamsSnapshotFile = path.join(INTERNAL_STATES_FOLDER, `system__parameters_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const moodSnapshotFile = path.join(INTERNAL_STATES_FOLDER, `current_mood_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const goalsSnapshotFile = path.join(INTERNAL_STATES_FOLDER, `current_goals_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const personalitySnapshotFile = path.join(INTERNAL_STATES_FOLDER, `personality_profile_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const conceptVectorsSnapshotFile = path.join(INTERNAL_STATES_FOLDER, `concept_vectors_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); const aiRulesSnapshotFile = path.join(RULES_FOLDER, `ai_rules_v${AI.system_Parameters.logicVersion}_${timestamp}.json`); const thoughtChainsSnapshotFile = path.join(LOGS_FOLDER, `thought_chains_v${AI.system_Parameters.memoryVersion}_${timestamp}.json`); await Promise.all([ saveData(memorySnapshotFile, AI.memory), saveData(vocabularySnapshotFile, AI.vocabulary), // Still save for overview saveData(unknownWordsSnapshotFile, AI.unknownWords), saveData(system_ParamsSnapshotFile, AI.system_Parameters), saveData(moodSnapshotFile, AI.currentMood), saveData(goalsSnapshotFile, AI.goals), saveData(personalitySnapshotFile, await loadData(PERSONALITY_PROFILE_FILE, {})), saveData(conceptVectorsSnapshotFile, AI.conceptVectors), saveData(aiRulesSnapshotFile, AI.aiRules), saveData(thoughtChainsSnapshotFile, AI.thoughtChains), ]); appendToAIDiary(`Evolutionary snapshot created: v${AI.system_Parameters.memoryVersion}. Logic V: ${AI.system_Parameters.logicVersion}.`); console.log(`Evolutionary snapshot created: v${AI.system_Parameters.memoryVersion}.`);}/** * Periodically saves the AI's current working state to the 'current_*.json' files. */async function autosaveCurrentState() { await Promise.all([ saveData(CURRENT_MEMORY_FILE, AI.memory), saveData(CURRENT_VOCABULARY_FILE, AI.vocabulary), saveData(UNKNOWN_WORDS_FILE, AI.unknownWords), saveData(SYSTEM_PARAMETERS_FILE, AI.system_Parameters), saveData(CURRENT_MOOD_FILE, AI.currentMood), saveData(CURRENT_GOALS_FILE, AI.goals), saveData(CONCEPT_VECTORS_FILE, AI.conceptVectors), saveData(AI_RULES_FILE, AI.aiRules), saveData(THOUGHT_CHAINS_FILE, AI.thoughtChains), ]);}/** * The main AI "tick" that runs periodically. * Manages aging, mood decay, idle activities, pruning, and occasional evolution. */async function aiTick() { AI.system_Parameters.aiAge++; for (const emotion in AI.currentMood) { AI.currentMood[emotion] = Math.max(0, AI.currentMood[emotion] - AI.system_Parameters.moodDecayRate); } const idleDuration = Date.now() - AI.lastUserInputTime; if (idleDuration >= AI.system_Parameters.idleThresholdMs) { if (AI.system_Parameters.aiAge % Math.ceil(AI.system_Parameters.dreamModeIntervalMs / 1000) === 0) { await dreamMode(); } if (AI.system_Parameters.aiAge % Math.ceil(AI.system_Parameters.reflectionIntervalMs / 1000) === 0) { await reflectMode(); } // --- AI Self-Chat Logic --- // If AI is very idle and there are active goals or unresolved contradictions, it might "talk to itself" if (idleDuration > AI.system_Parameters.idleThresholdMs * 2 && (AI.goals.filter(g => g.status === 'active').length > 0 || AI.memory.some(m => m.category === 'contradiction' && m.beliefScore < 0.8))) { let internalQuestion = ''; const activeGoals = AI.goals.filter(g => g.status === 'active').sort((a, b) => b.urgency - a.urgency); const unresolvedContradictions = AI.memory.filter(m => m.category === 'contradiction' && m.beliefScore < 0.8); if (activeGoals.length > 0 && Math.random() < 0.7) { // Prioritize goals const goal = activeGoals[0]; internalQuestion = `How can I make progress on my goal to ${goal.description}?`; } else if (unresolvedContradictions.length > 0) { // Address contradictions const contradiction = unresolvedContradictions[0]; internalQuestion = `I need to resolve the conflict between "${contradiction.input}". What is the truth?`; } else if (Object.keys(AI.unknownWords).length > 0 && AI.system_Parameters.curiosityLevel > 0.5) { const unknownWord = Object.keys(AI.unknownWords)[0]; internalQuestion = `What does "${unknownWord}" mean?`; } else { // General reflection or new idea generation internalQuestion = `What new insights can I discover today?`; } if (internalQuestion) { console.log(`AI self-chatting: "${internalQuestion.substring(0, Math.min(internalQuestion.length, 50))}"`); appendToAIDiary(`AI SELF-CHAT: "${internalQuestion}"`); // Process the internal question as if it were user input, but mark as internal const selfChatResult = await processUserInput(internalQuestion, true); appendToAIDiary(`AI SELF-RESPONSE: "${selfChatResult.response}"`); console.log(`AI self-response: "${selfChatResult.response.substring(0, Math.min(selfChatResult.response.length, 50))}"`); } } } if (AI.memory.length > AI.system_Parameters.maxMemorySize * 0.9 && Math.random() < 0.2) { pruneMemory(Math.floor(AI.system_Parameters.maxMemorySize * 0.05)); } if (Math.random() < 0.001) { await evolveLogic(); } if (AI.system_Parameters.aiAge % Math.ceil(AI.system_Parameters.saveIntervalMs / 1000) === 0) { await autosaveCurrentState(); }}// --- Server Setup and Initialization ---const app = express();const server = http.createServer(app);app.use(express.json());app.use(express.static(DATA_DIR));// Route for the main AI interactionapp.post('/api/chat', async (req, res) => { const userInput = req.body.message; if (!userInput) return res.status(400).json({ error: "Message is required." }); console.log(`Received message via Express: "${userInput.substring(0, Math.min(userInput.length, 50))}"`); try { let responseContent; let internalFeedback = {}; const lowerCaseUserInput = userInput.toLowerCase(); if (AI.learningRequest && (lowerCaseUserInput.includes('yes') || lowerCaseUserInput.includes('sure') || lowerCaseUserInput.includes('go on') || lowerCaseUserInput.includes('teach me'))) { if (AI.learningRequest.type === 'direct_question_learning') { responseContent = "Go on, teach me. I am ready to learn."; // The next user input will be processed as learning content for the goal } } else if (lowerCaseUserInput.startsWith('learn:')) { responseContent = await handleLearnCommand(lowerCaseUserInput); } else if (lowerCaseUserInput.startsWith('define:')) { responseContent = await handleDefineCommand(lowerCaseUserInput); } else if (lowerCaseUserInput.startsWith('train:')) { responseContent = await handleTrainCommand(lowerCaseUserInput); } else if (lowerCaseUserInput.startsWith('goal:')) { responseContent = await handleGoalCommand(lowerCaseUserInput); } else if (lowerCaseUserInput === 'ai:status') { const personality = await loadData(PERSONALITY_PROFILE_FILE, {}); const activeGoalsCount = AI.goals.filter(g => g.status === 'active').length; const completedGoalsCount = AI.goals.filter(g => g.status === 'completed').length; responseContent = `AI Status: Age ${AI.system_Parameters.aiAge}, Memory V: ${AI.system_Parameters.memoryVersion}, Logic V: ${AI.system_Parameters.logicVersion}. Total Memories: ${AI.memory.length}. Vocabulary Size: ${Object.keys(AI.vocabulary).length}. Unknown Words: ${Object.keys(AI.unknownWords).length}. Learning Rate: ${AI.system_Parameters.learningRate.toFixed(3)}. Mood: ${JSON.stringify(AI.currentMood, null, 2)}. Dominant Theme: ${personality.dominantTheme || 'N/A'}. Active Goals: ${activeGoalsCount}. Completed Goals: ${completedGoalsCount}. Curiosity: ${AI.system_Parameters.curiosityLevel.toFixed(2)}.`; } else if (lowerCaseUserInput === 'ai:snapshot') { await createEvolutionarySnapshot(); responseContent = `Evolutionary snapshot triggered. Version ${AI.system_Parameters.memoryVersion}.`; } else if (lowerCaseUserInput === 'ai:reflect') { await reflectMode(); responseContent = `I am currently reflecting deeply on my thoughts and state.`; } else if (lowerCaseUserInput === 'ai:dream') { await dreamMode(); responseContent = `I am entering a dream-like state to foster new ideas.`; } else if (lowerCaseUserInput === 'ai:evolve_logic') { await evolveLogic(); responseContent = `Attempted logic evolution. Please check server console for details. A server restart might be needed for full effect if core logic ('server.js') was modified.`; } else if (lowerCaseUserInput === 'ai:monologue') { responseContent = `My recent internal thoughts: ${AI.internalMonologue.map(m => `[${new Date(m.time).toLocaleTimeString()}] ${m.text}`).join('\n') || 'No recent monologue entries.'}`; } else if (lowerCaseUserInput === 'ai:goals') { const activeGoals = AI.goals.filter(g => g.status === 'active'); if (activeGoals.length > 0) { responseContent = `My active goals are:\n${activeGoals.map(g => `- '${g.description}' (Progress: ${g.progress.toFixed(0)}%)`).join('\n')}`; } else { responseContent = `I currently have no active goals.`; } } else if (lowerCaseUserInput === 'ai:unknown_words') { const unknownWordsList = Object.entries(AI.unknownWords) .sort((a, b) => b[1].count - a[1].count) .map(([word, data]) => `- ${word} (seen ${data.count} times, last: ${new Date(data.lastSeen).toLocaleDateString()})`); responseContent = `Unknown words I've encountered:\n${unknownWordsList.join('\n') || 'None recorded.'}`; } else if (lowerCaseUserInput === 'ai:thought_chains') { responseContent = `Recent thought chains:\n${AI.thoughtChains.map((chain, idx) => `Chain ${idx + 1} (${chain.thoughts.length} thoughts):\n${chain.thoughts.map(t => `-- [${new Date(t.time).toLocaleTimeString()}] Input: "${t.input.substring(0, 40)}...", Response: "${t.response.substring(0, 40)}..."`).join('\n')}`).join('\n\n') || 'No recent thought chains.'}`; } else if (lowerCaseUserInput.startsWith('ai:reverse_map:')) { const wordToMap = lowerCaseUserInput.substring('ai:reverse_map:'.length).trim(); const similarWords = await reverseWordMapping(wordToMap); responseContent = `Words conceptually or phonetically similar to '${wordToMap}': ${similarWords.join(', ') || 'None found.'}`; } else { const result = await processUserInput(lowerCaseUserInput); responseContent = result.response; internalFeedback = { thoughtID: result.thought ? result.thought.id : 'N/A', status: result.internalStatus, similarity: result.thought?.certainty?.toFixed(2) || 'N/A', category: result.thought?.category || 'N/A', mood: AI.currentMood, }; } res.json({ type: 'ai_message', content: responseContent, feedback: internalFeedback }); } catch (error) { console.error("Error processing Express message:", error); appendToAIDiary(`ERROR: Express message processing failed for "${userInput.substring(0, Math.min(userInput.length, 50))}"...: ${error.message}`); res.status(500).json({ type: 'ai_message', content: "An internal error occurred while processing your request. I've logged it." }); }});/** * Initializes predefined concept vectors for emotions, logic, etc. * These are static, foundational concepts for the AI's understanding. * Simulates pre-trained model datasets. */async function initializeConceptVectors() { ensureDirectoryExists(DATASET_FOLDER); // Ensure dataset folder exists // These would typically come from large external pre-trained models. // For this simulation, they are generated once. AI.conceptVectors = { emotions: { joy: normalizeVector(await getSentenceEmbedding(tokenize('joyful happy positive cheerful exhilarated'))), fear: normalizeVector(await getSentenceEmbedding(tokenize('fearful scared terrified anxious horrified'))), sadness: normalizeVector(await getSentenceEmbedding(tokenize('sad depressed grief sorrowful melancholic'))), curiosity: normalizeVector(await getSentenceEmbedding(tokenize('curious wonder explore learn investigate inquisitive'))), anger: normalizeVector(await getSentenceEmbedding(tokenize('angry frustrated rage furious irritated annoyed'))), surprise: normalizeVector(await getSentenceEmbedding(tokenize('surprise astonished unexpected shocked startled'))), trust: normalizeVector(await getSentenceEmbedding(tokenize('trustworthy reliable honest secure dependable'))), anticipation: normalizeVector(await getSentenceEmbedding(tokenize('anticipate expect predict future upcoming foreseeing'))), disgust: normalizeVector(await getSentenceEmbedding(tokenize('disgust nauseated revolted aversion loathing'))), }, logic: { reason: normalizeVector(await getSentenceEmbedding(tokenize('logic reasoning rational argument analysis inference'))), cause: normalizeVector(await getSentenceEmbedding(tokenize('cause trigger origin source begin instigate'))), effect: normalizeVector(await getSentenceEmbedding(tokenize('effect result consequence outcome impact fruition'))), contradiction: normalizeVector(await getSentenceEmbedding(tokenize('contradict oppose conflict inconsistency paradox'))), analogy: normalizeVector(await getSentenceEmbedding(tokenize('analogy compare similarity parallel correlation'))), problem: normalizeVector(await getSentenceEmbedding(tokenize('problem challenge difficulty obstacle dilemma'))), solution: normalizeVector(await getSentenceEmbedding(tokenize('solution answer resolve fix overcome remedy'))), negation: normalizeVector(await getSentenceEmbedding(tokenize('not no opposite reverse negate deny'))), // For understanding negation relationship: normalizeVector(await getSentenceEmbedding(tokenize('relationship connection bond link tie association'))), // For understanding text structure/grammar relationships }, identity: { self: normalizeVector(await getSentenceEmbedding(tokenize('self I me mine sentient consciousness'))), ai: normalizeVector(await getSentenceEmbedding(tokenize('ai artificial intelligence robot machine algorithm entity'))), user: normalizeVector(await getSentenceEmbedding(tokenize('you user human person client individual'))), }, general: { question: normalizeVector(await getSentenceEmbedding(tokenize('question inquire ask interrogate query'))), statement: normalizeVector(await getSentenceEmbedding(tokenize('statement declare affirm as_sert articulate'))), command: normalizeVector(await getSentenceEmbedding(tokenize('command instruct order direct mandate'))), greeting: normalizeVector(await getSentenceEmbedding(tokenize('hello hi greetings welcome salutations'))), farewell: normalizeVector(await getSentenceEmbedding(tokenize('goodbye bye farewell see you later adieu'))), }, // For general semantic similarity, e.g., categories of objects or abstract concepts objects: normalizeVector(await getSentenceEmbedding(tokenize('table chair house car tree book computer'))), abstract_concepts: normalizeVector(await getSentenceEmbedding(tokenize('freedom justice love peace time knowledge'))), actions: normalizeVector(await getSentenceEmbedding(tokenize('run jump eat sleep think learn create'))), properties: normalizeVector(await getSentenceEmbedding(tokenize('big small fast slow good bad red blue'))), groups: {}, // This will be dynamically populated };}/** * Initializes the AI: creates folders, loads data, sets up intervals. * This runs when the server starts. */async function initializeAI() { console.log("Initializing AI..."); ensureDirectoryExists(MEMORY_FOLDER); ensureDirectoryExists(VOCABULARY_FOLDER); ensureDirectoryExists(INTERNAL_STATES_FOLDER); ensureDirectoryExists(LOGS_FOLDER); ensureDirectoryExists(WORDS_FOLDER); ensureDirectoryExists(PHRASE_FOLDER); ensureDirectoryExists(RULES_FOLDER); ensureDirectoryExists(DATASET_FOLDER); AI.memory = await loadData(CURRENT_MEMORY_FILE, []); AI.vocabulary = await loadData(CURRENT_VOCABULARY_FILE, {}); AI.phrases = await loadData(path.join(PHRASE_FOLDER, 'phrases_overview.json'), {}); // Load overview of phrases AI.unknownWords = await loadData(UNKNOWN_WORDS_FILE, {}); AI.system_Parameters = await loadData(SYSTEM_PARAMETERS_FILE, AI.system_Parameters); AI.currentMood = await loadData(CURRENT_MOOD_FILE, AI.currentMood); AI.goals = await loadData(CURRENT_GOALS_FILE, AI.goals); AI.conceptVectors = await loadData(CONCEPT_VECTORS_FILE, {}); AI.aiRules = await loadData(AI_RULES_FILE, { tokenization: [], symbolicReasoning: [] }); // Load AI modifiable rules AI.thoughtChains = await loadData(THOUGHT_CHAINS_FILE, []); // Ensure all existing goals have a vector for (const goal of AI.goals) { if (goal.description && !goal.vector) { goal.vector = await getSentenceEmbedding(tokenize(goal.description)); } } if (Object.keys(AI.conceptVectors).length === 0 || !AI.conceptVectors.emotions) { console.log("Initializing default concept vectors..."); await initializeConceptVectors(); await saveData(CONCEPT_VECTORS_FILE, AI.conceptVectors); } try { const currentCode = await fs.promises.readFile(path.join(__dirname, 'server.js'), 'utf8'); AI.currentDNAHash = generateHash(currentCode); } catch (error) { console.warn("Could not calculate initial server.js DNA hash. Self-modification might be risky.", error.message); } console.log(`AI initialized. Age: ${AI.system_Parameters.aiAge}, Memory V: ${AI.system_Parameters.memoryVersion}, Logic V: ${AI.system_Parameters.logicVersion}. Loaded ${AI.memory.length} memories, ${Object.keys(AI.vocabulary).length} words (overview), ${Object.keys(AI.phrases).length} phrases (overview), ${Object.keys(AI.unknownWords).length} unknown words.`); appendToAIDiary(`AI initialized. Age: ${AI.system_Parameters.aiAge}. Loaded state.`); setInterval(aiTick, 1000); // AI's "heartbeat" await autosaveCurrentState();}server.listen(PORT, HOST, async () => { console.log(`Server running at http://${HOST}:${PORT}/`); await initializeAI();});process.on('SIGINT', async () => { console.log('\nAI shutting down gracefully...'); appendToAIDiary('AI shutting down gracefully...'); await autosaveCurrentState(); process.exit(0);});