// server.jsconst express = require('express');const http = require('http');const fs = require('fs');const path = require('path');const crypto = require('crypto');// --- Configuration Constants ---const PORT = process.env.PORT || 3000;const HOST = 'localhost';const DATA_DIR = path.join(__dirname, 'data');const MEMORY_FOLDER = path.join(DATA_DIR, 'memory');const VOCABULARY_FOLDER = path.join(DATA_DIR, 'vocabulary');const PHRASE_FOLDER = path.join(DATA_DIR, 'phrases');const INTERNAL_STATES_FOLDER = path.join(DATA_DIR, 'internal_states');const LOGS_FOLDER = path.join(DATA_DIR, 'logs');const WORDS_FOLDER = path.join(VOCABULARY_FOLDER, 'words'); // Individual word filesconst RULES_FOLDER = path.join(DATA_DIR, 'rules'); // For AI modifiable rulesconst DATASET_FOLDER = path.join(DATA_DIR, 'datasets'); // For predefined concept vectors, etc.// File paths for current stateconst CURRENT_MEMORY_FILE = path.join(MEMORY_FOLDER, 'current_memory.json');const CURRENT_VOCABULARY_FILE = path.join(VOCABULARY_FOLDER, 'current_vocabulary.json');const UNKNOWN_WORDS_FILE = path.join(VOCABULARY_FOLDER, 'unknown_words.json');const SYSTEM_PARAMETERS_FILE = path.join(INTERNAL_STATES_FOLDER, 'system__parameters.json');const CURRENT_MOOD_FILE = path.join(INTERNAL_STATES_FOLDER, 'current_mood.json');const CURRENT_GOALS_FILE = path.join(INTERNAL_STATES_FOLDER, 'current_goals.json');const PERSONALITY_PROFILE_FILE = path.join(INTERNAL_STATES_FOLDER, 'personality_profile.json');const CONCEPT_VECTORS_FILE = path.join(INTERNAL_STATES_FOLDER, 'concept_vectors.json');const AI_RULES_FILE = path.join(RULES_FOLDER, 'ai_rules.json');const AI_DIARY_FILE = path.join(LOGS_FOLDER, 'ai_diary.log');const THOUGHT_CHAINS_FILE = path.join(LOGS_FOLDER, 'current_thought_chains.json');// --- AI Global State (In-Memory Representation) ---const AI = { // Core Data Stores memory: [], // List of { id, input, tokens, vector, response, category, time, usageCount, feedbackScore, age, parentId[], childId[], emotionalTags{}, beliefScore, certainty, relevanceToGoals, goalProgressImpact, knowledgeGraph{} } vocabulary: {}, // Map of { word: { text, vector, definition, usageContext[], emotion{}, linkedConcepts[], mutationScore, introducedAt, lastUsedAt, retrainCount, definedByUser, logicalUnderstanding{} } } phrases: {}, // Map of { phrase: { text, vector, usageContext[], linkedConcepts[], mutationScore, introducedAt, lastUsedAt, retrainCount, type, relationships{} } } unknownWords: {}, // Stores unknown words encountered { word: { count, lastSeen, contexts: [] } } aiRules: {}, // AI-modifiable rules loaded from ai_rules.json // Dynamic Internal States system_Parameters: { aiAge: 0, // AI's internal age (in ticks/seconds) memoryVersion: 0, // Incremented on major memory snapshots logicVersion: 0, // Incremented on logic self-modification (via ai_rules.json or server.js direct patches) saveIntervalMs: 30 * 1000, // Autosave current state every 30 seconds dreamModeIntervalMs: 5 * 60 * 1000, // Dream mode every 5 minutes when idle reflectionIntervalMs: 10 * 60 * 1000, // Reflection every 10 minutes when idle idleThresholdMs: 1 * 60 * 1000, // Consider AI idle after 1 minute of no user input pruningThreshold: 0.05, // Memories with normalized usage below this might be pruned moodDecayRate: 0.005, // How fast mood returns to neutral per tick maxMemorySize: 20000, // Max number of memories before aggressive pruning confidenceThreshold: 0.65, // Cosine similarity threshold for confident response contradictionThreshold: 0.85, // Cosine similarity threshold below which an idea is flagged as potentially contradictory vectorSize: 100, // Dimension of all vectors (increased for better semantic understanding) learningRate: 0.05, // Increased learning rate for faster adaptation curiosityLevel: 1.0, // Highest curiosity // Neural network parameters (conceptual) for transformer simulation ffnWeights: null, ffnBiases: null, // More parameters to control AI behavior and learning }, currentMood: { // Current emotional state, values 0.0-1.0 joy: 0.0, fear: 0.0, sadness: 0.0, curiosity: 0.0, anger: 0.0, surprise: 0.0, trust: 0.0, anticipation: 0.0, }, goals: [], // List of { id, description, status, created, urgency, subgoals[], progress, vector, type, concept } internalMonologue: [], // Log of AI's internal reflective thoughts thoughtChains: [], // Stores sequences of related thoughts { chainId, thoughts: [{id, input, response, time}] } currentDNAHash: '', // SHA256 hash of server.js for logic versioning lastUserInputTime: Date.now(), // Timestamp of last user interaction isDreaming: false, // Flag to prevent multiple dream modes isReflecting: false, // Flag to prevent multiple reflection modes // Other dynamic internal variables (e.g., current focus, internal hypotheses) conceptVectors: {}, // Stores vectors for abstract concepts like emotions, logic, etc. learningRequest: null, // Stores active learning request from AI { goalId, type, content }};// --- Helper Functions for File Operations ---/** * Ensures a directory exists, creating it if necessary. * @param {string} dirPath The path to the directory. */function ensureDirectoryExists(dirPath) { if (!fs.existsSync(dirPath)) { fs.mkdirSync(dirPath, { recursive: true }); console.log(`Directory created: ${dirPath}`); }}/** * Loads data from a JSON file. * @param {string} filePath The path to the JSON file. * @param {any} defaultValue The default value to return if the file doesn't exist or is empty. * @returns {Promise} A promise that resolves with the parsed data. */async function loadData(filePath, defaultValue) { ensureDirectoryExists(path.dirname(filePath)); // Ensure parent directory exists try { if (fs.existsSync(filePath)) { const data = await fs.promises.readFile(filePath, 'utf8'); return data ? JSON.parse(data) : defaultValue; } } catch (error) { console.error(`Error loading ${filePath}:`, error.message); appendToAIDiary(`ERROR: Failed to load ${filePath}: ${error.message}`); } return defaultValue;}/** * Saves data to a JSON file. * @param {string} filePath The path to the JSON file. * @param {any} data The data to save. * @returns {Promise} A promise that resolves when data is saved. */async function saveData(filePath, data) { ensureDirectoryExists(path.dirname(filePath)); // Ensure parent directory exists try { await fs.promises.writeFile(filePath, JSON.stringify(data, null, 2), 'utf8'); // console.log(`Data saved to: ${filePath}`); // Too verbose for continuous saving } catch (error) { console.error(`Error saving ${filePath}:`, error.message); appendToAIDiary(`ERROR: Failed to save ${filePath}: ${error.message}`); }}/** * Appends a log message to the AI diary file. * @param {string} message The message to log. */async function appendToAIDiary(message) { ensureDirectoryExists(LOGS_FOLDER); const timestamp = new Date().toISOString(); try { await fs.promises.appendFile(AI_DIARY_FILE, `[${timestamp}] ${message}\n`, 'utf8'); } catch (error) { console.error(`Error appending to AI diary:`, error.message); }}/** * Generates a SHA256 hash of a string. * @param {string} data The string to hash. * @returns {string} The SHA256 hash. */function generateHash(data) { return crypto.createHash('sha256').update(data).digest('hex');}// --- Core AI Logic Implementation ---/** * Normalizes a vector to have a magnitude of 1. * @param {number[]} vector * @returns {number[]} */function normalizeVector(vector) { if (!vector || vector.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); const magnitude = Math.sqrt(vector.reduce((sum, val) => sum + val * val, 0)); return magnitude === 0 ? vector : vector.map(val => val / magnitude);}/** * Calculates cosine similarity between two vectors. * @param {number[]} vec1 * @param {number[]} vec2 * @returns {number} */function cosineSimilarity(vec1, vec2) { if (!vec1 || !vec2 || vec1.length === 0 || vec2.length === 0 || vec1.length !== vec2.length) { return 0; } let dotProduct = 0; let magnitude1 = 0; let magnitude2 = 0; for (let i = 0; i < vec1.length; i++) { dotProduct += vec1[i] * vec2[i]; magnitude1 += vec1[i] * vec1[i]; magnitude2 += vec2[i] * vec2[i]; } magnitude1 = Math.sqrt(magnitude1); magnitude2 = Math.sqrt(magnitude2); if (magnitude1 === 0 || magnitude2 === 0) { return 0; } return dotProduct / (magnitude1 * magnitude2);}/** * Tokenizes input text into clean words and handles some basic grammar. * Integrates AI-modifiable rules from `ai_rules.json`. * @param {string} text * @returns {string[]} */function tokenize(text) { if (typeof text !== 'string' || text === null) { console.error("tokenize received non-string input:", text); return []; } text = text.toLowerCase(); // Apply AI-generated/modified tokenization rules if (AI.aiRules.tokenization) { for (const rule of AI.aiRules.tokenization) { text = text.replace(new RegExp(rule.pattern, rule.flags), rule.replacement); } } // Default contractions and punctuation removal text = text.replace(/['’](?![a-z])/g, '').replace(/[.,!?;:"#$%^&*(){}[\]<>`~_+=|\\/]/g, ' '); return text.split(/\s+/).filter(word => word.length > 0);}/** * Custom word embedding: Generates a vector from a word based on character patterns, conceptual features, and phonetic similarity. * @param {string} word * @returns {number[]} */function generateWordVector(word) { const vector = new Array(AI.system_Parameters.vectorSize).fill(0); if (!word) return normalizeVector(vector); // Feature 1: Character N-grams (bi-grams and tri-grams) for basic phonetic/structural similarity const charNgrams = (n, s) => { const grams = []; for (let i = 0; i <= s.length - n; i++) { grams.push(s.substring(i, i + n)); } return grams; }; const bigrams = charNgrams(2, word); const trigrams = charNgrams(3, word); bigrams.forEach((gram) => { let hash = 0; for (let i = 0; i < gram.length; i++) hash = (hash << 5) - hash + gram.charCodeAt(i); vector[(Math.abs(hash) % AI.system_Parameters.vectorSize)] += 0.1; }); trigrams.forEach((gram) => { let hash = 0; for (let i = 0; i < gram.length; i++) hash = (hash << 5) - hash + gram.charCodeAt(i); vector[(Math.abs(hash) % AI.system_Parameters.vectorSize)] += 0.15; }); // Feature 2: Positional character values + distributed character presence for (let i = 0; i < word.length; i++) { const charCode = word.charCodeAt(i); const charVal = (charCode - 97) / 26; // Normalize a-z to 0-1 for (let j = 0; j < AI.system_Parameters.vectorSize; j++) { vector[j] += charVal * Math.sin(i * j); } if (i === 0) vector[0] += charVal * 0.5; if (i === word.length - 1) vector[AI.system_Parameters.vectorSize - 1] += charVal * 0.5; } // Feature 3: Word length influence vector[1] += word.length / 20; // Feature 4: Vowel/Consonant ratio const vowels = (word.match(/[aeiou]/g) || []).length; const consonants = word.length - vowels; if (word.length > 0) { vector[2] += vowels / word.length; vector[3] += consonants / word.length; } // Feature 5: Simple semantic cues (e.g., negative prefixes, temporal suffixes) if (word.startsWith('un')) vector[4] += -0.2; // Antonymic hint if (word.startsWith('dis')) vector[4] += -0.2; if (word.endsWith('ing')) vector[5] += 0.1; // Verbal hint if (word.endsWith('ly')) vector[6] += 0.1; // Adverbial hint // Feature 6: Phonetic-ish similarity (very basic, conceptual) // Map common sound patterns to vector dimensions const phoneticMap = { 'sh': 7, 'ch': 8, 'th': 9, 'qu': 10, 'gh': 11, }; for (const pattern in phoneticMap) { if (word.includes(pattern)) { vector[phoneticMap[pattern]] += 0.1; } } return normalizeVector(vector);}/** * Generates a sentence/phrase embedding from word vectors, considering context and structure. * Implements conceptual re-occurrence matrix (co-occurrence counts) and dimensionality reduction. * Also handles phrase-level data loading and saving. * @param {string[]} tokens * @param {string} originalPhrase Optional: the original phrase string for phrase saving. * @returns {Promise} */async function getSentenceEmbedding(tokens, originalPhrase = null) { if (tokens.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); const wordVectors = []; const wordOccurrences = {}; // For re-occurrence matrix for (const token of tokens) { const wordFilePath = path.join(WORDS_FOLDER, `${token}.json`); let wordData = await loadData(wordFilePath, null); if (!wordData) { const newVector = generateWordVector(token); wordData = { text: token, vector: newVector, definition: `Automatically generated.`, usageContext: [], emotion: {}, linkedConcepts: [], mutationScore: 0, introducedAt: AI.system_Parameters.aiAge, lastUsedAt: Date.now(), retrainCount: 0, definedByUser: false, logicalUnderstanding: { type: 'unknown', relationships: {}, attributes: {}, examples: [] }, phoneticPronunciation: generatePhoneticApproximation(token), // Simple approximation reverseMappings: [], }; await saveData(wordFilePath, wordData); AI.unknownWords[token] = AI.unknownWords[token] || { count: 0, lastSeen: 0, contexts: [] }; AI.unknownWords[token].count++; AI.unknownWords[token].lastSeen = Date.now(); if (AI.unknownWords[token].contexts.length < 5) AI.unknownWords[token].contexts.push(tokens.join(' ')); } wordData.lastUsedAt = Date.now(); await saveData(wordFilePath, wordData); wordVectors.push(wordData.vector); wordOccurrences[token] = (wordOccurrences[token] || 0) + 1; } let sentenceVector = Array(AI.system_Parameters.vectorSize).fill(0); // Contextual Word Embeddings: Positional Weighting for (let i = 0; i < wordVectors.length; i++) { const positionWeight = 1 + Math.cos(Math.PI * i / (wordVectors.length - 1)) * 0.2; sentenceVector = sentenceVector.map((val, idx) => val + wordVectors[i][idx] * positionWeight); } // Conceptual Re-occurrence Matrix / Co-occurrence and Dimensionality Reduction (Dense Vectors) // This is a simplified simulation. In reality, this would involve matrix factorization (e.g., SVD) // on a large co-occurrence matrix derived from a massive corpus. // Here, we simulate a "blending" based on co-occurrence within this single sentence. const coOccurrenceVector = Array(AI.system_Parameters.vectorSize).fill(0); for (let i = 0; i < tokens.length; i++) { for (let j = i + 1; j < tokens.length; j++) { // Very simple co-occurrence logic: blend vectors if words appear together const word1Vector = (await loadData(path.join(WORDS_FOLDER, `${tokens[i]}.json`), {})).vector || Array(AI.system_Parameters.vectorSize).fill(0); const word2Vector = (await loadData(path.join(WORDS_FOLDER, `${tokens[j]}.json`), {})).vector || Array(AI.system_Parameters.vectorSize).fill(0); coOccurrenceVector.map((val, idx) => val + (word1Vector[idx] + word2Vector[idx]) / 2 * 0.01); // Small influence } } sentenceVector = sentenceVector.map((val, idx) => val + coOccurrenceVector[idx]); // Add co-occurrence influence sentenceVector = normalizeVector(sentenceVector); // Phrase Recognition and Saving if (originalPhrase) { const phraseFilePath = path.join(PHRASE_FOLDER, `${generateHash(originalPhrase)}.json`); let phraseData = await loadData(phraseFilePath, null); if (!phraseData) { phraseData = { text: originalPhrase, vector: sentenceVector, usageContext: [], linkedConcepts: [], introducedAt: AI.system_Parameters.aiAge, lastUsedAt: Date.now(), retrainCount: 0, type: 'user_defined', relationships: {}, wordOccurrences: wordOccurrences, }; } else { // Update phrase vector by blending with new one phraseData.vector = normalizeVector(phraseData.vector.map((val, i) => (val + sentenceVector[i]) / 2)); phraseData.lastUsedAt = Date.now(); phraseData.retrainCount++; for (const word in wordOccurrences) { // Update word occurrences phraseData.wordOccurrences[word] = (phraseData.wordOccurrences[word] || 0) + wordOccurrences[word]; } } await saveData(phraseFilePath, phraseData); AI.phrases[originalPhrase] = phraseData; // Update in-memory cache for phrases } return sentenceVector;}/** * Simulates a basic Feed-Forward Network layer. * Weights and biases would ideally be learned and evolved. * For now, they are either default or loaded from internal_states. * @param {number[]} inputVector * @param {number[][]} weights * @param {number[]} biases * @param {function} activationFn * @returns {number[]} */function feedForwardLayer(inputVector, weights, biases, activationFn) { const outputSize = weights.length; const outputVector = new Array(outputSize).fill(0); for (let i = 0; i < outputSize; i++) { let sum = 0; for (let j = 0; j < inputVector.length; j++) { sum += inputVector[j] * weights[i][j]; } sum += biases[i]; outputVector[i] = activationFn(sum); } return outputVector;}/** * Sigmoid activation function. * @param {number} x * @returns {number} */function sigmoid(x) { return 1 / (1 + Math.exp(-x));}/** * ReLU activation function. * @param {number} x * @returns {number} */function relu(x) { return Math.max(0, x);}/** * Simulates a more detailed (but still custom) transformer-like block. * Uses embedding, attention, and a simple feed-forward network. * Enhancements: Multi-head attention concept, residual connections. * @param {string[]} tokens * @returns {Promise} The processed thought vector. */async function transformerSimulation(tokens) { if (tokens.length === 0) return Array(AI.system_Parameters.vectorSize).fill(0); // 1. Initial Embedding Layer let currentVector = await getSentenceEmbedding(tokens, tokens.join(' ')); // Pass original phrase for saving // Initialize FFN weights and biases if they don't exist if (!AI.system_Parameters.ffnWeights || !AI.system_Parameters.ffnBiases) { AI.system_Parameters.ffnWeights = Array(AI.system_Parameters.vectorSize) .fill(0) .map(() => Array(AI.system_Parameters.vectorSize).fill(0).map(() => (Math.random() * 2 - 1) * 0.1)); AI.system_Parameters.ffnBiases = Array(AI.system_Parameters.vectorSize) .fill(0) .map(() => (Math.random() * 2 - 1) * 0.05); } // 2. Self-Attention Simulation (simplified multi-head concept) let attentionOutput = Array(AI.system_Parameters.vectorSize).fill(0); const numHeads = 4; for (let h = 0; h < numHeads; h++) { const queryVector = currentVector.map(val => val * (1 + Math.sin(h * 0.5))); const keyVector = currentVector.map(val => val * (1 + Math.cos(h * 0.3))); const valueVector = currentVector.map(val => val * (1 + Math.sin(h * 0.7))); const attentionScore = cosineSimilarity(queryVector, keyVector); const weightedValue = valueVector.map(val => val * attentionScore); attentionOutput = attentionOutput.map((val, idx) => val + weightedValue[idx] / numHeads); } // Residual Connection 1 currentVector = currentVector.map((val, idx) => val + attentionOutput[idx]); // 3. Feed-Forward Network Layer let ffnOutput = feedForwardLayer(currentVector, AI.system_Parameters.ffnWeights, AI.system_Parameters.ffnBiases, relu); // Residual Connection 2 currentVector = currentVector.map((val, idx) => val + ffnOutput[idx]); // Layer Normalization (simplified) currentVector = normalizeVector(currentVector); return currentVector;}/** * Adds emotional tags to a thought vector based on its content and AI's current mood. * @param {number[]} thoughtVector * @param {string} text The original input text. * @returns {object} Emotional tags { joy: 0.x, fear: 0.x, ... } */function tagEmotion(thoughtVector, text) { const emotionalTags = { joy: 0.0, fear: 0.0, sadness: 0.0, curiosity: 0.0, anger: 0.0, surprise: 0.0, trust: 0.0, anticipation: 0.0, }; for (const emotionName in AI.conceptVectors.emotions) { const emotionVector = AI.conceptVectors.emotions[emotionName]; if (emotionVector) { const similarity = cosineSimilarity(thoughtVector, emotionVector); emotionalTags[emotionName] = Math.max(0, similarity); } } // Keyword-based emotional boosting if (/(happy|love|joy|excited|great|good|positive|delight|thrilled)/i.test(text)) emotionalTags.joy = Math.min(1, emotionalTags.joy + 0.2); if (/(sad|unhappy|depressed|grief|cry|mourn|lonely|despair)/i.test(text)) emotionalTags.sadness = Math.min(1, emotionalTags.sadness + 0.2); if (/(fear|scared|terror|anxiety|danger|threat|panic|worry)/i.test(text)) emotionalTags.fear = Math.min(1, emotionalTags.fear + 0.2); if (/(curious|wonder|learn|explore|why|how|investigate|discover)/i.test(text)) emotionalTags.curiosity = Math.min(1, emotionalTags.curiosity + 0.2); if (/(angry|mad|frustrated|rage|hate|annoyed|irritated)/i.test(text)) emotionalTags.anger = Math.min(1, emotionalTags.anger + 0.2); if (/(surprise|shock|astonish|unexpected|wow)/i.test(text)) emotionalTags.surprise = Math.min(1, emotionalTags.surprise + 0.2); if (/(trust|believe|depend|reliable|faith)/i.test(text)) emotionalTags.trust = Math.min(1, emotionalTags.trust + 0.2); if (/(expect|anticipate|hope|future|soon)/i.test(text)) emotionalTags.anticipation = Math.min(1, emotionalTags.anticipation + 0.2); for (const emotion in emotionalTags) { emotionalTags[emotion] = Math.min(1, emotionalTags[emotion] + (AI.currentMood[emotion] || 0) * 0.15); } const sumEmotion = Object.values(emotionalTags).reduce((sum, val) => sum + val, 0); if (sumEmotion > 0) { for (const emotion in emotionalTags) emotionalTags[emotion] /= sumEmotion; } return emotionalTags;}/** * Determines a category for a given thought vector, leveraging knowledge graph/concept vectors. * @param {number[]} thoughtVector * @returns {string} */function categorizeThought(thoughtVector) { let bestCategory = 'general'; let highestSimilarity = 0; for (const categoryType in AI.conceptVectors) { for (const conceptName in AI.conceptVectors[categoryType]) { const conceptVector = AI.conceptVectors[categoryType][conceptName]; if (conceptVector) { const similarity = cosineSimilarity(thoughtVector, conceptVector); if (similarity > highestSimilarity) { highestSimilarity = similarity; bestCategory = `${categoryType}:${conceptName}`; } } } } return bestCategory;}/** * Synthesizes a response by combining multiple relevant memories, rephrasing, and generating new sentences. * Attempts to use synonyms and antonyms. * @param {object} bestMatch The highest similarity memory. * @param {Array