{ "version": "1.0", "site": "https://ragyfied.com", "description": "Semantic knowledge graph for RAGyfied — an AI engineering publication covering RAG, LLMs, and agentic AI", "entities": [ { "name": "RAGyfied", "type": "Organization", "url": "https://ragyfied.com", "description": "Technical AI publication explaining LLMs and RAG like system design, for software engineers", "topics": ["RAG", "LLM", "Agentic AI", "AI Engineering", "Open Source AI"], "sameAs": ["https://twitter.com/ragyfied"] }, { "name": "Amaya Birek", "type": "Person", "url": "https://ragyfied.com/about", "jobTitle": "AI Engineer", "description": "Author at RAGyfied. AI engineer writing about LLMs, RAG pipelines, and agentic systems for software engineers.", "sameAs": ["https://twitter.com/ragyfied"] }, { "name": "Retrieval-Augmented Generation", "type": "Concept", "aliases": ["RAG", "RAG pipeline", "retrieval augmented generation"], "url": "https://ragyfied.com/articles/what-is-rag", "description": "AI technique combining information retrieval with LLM text generation to ground outputs in factual, up-to-date information", "relatedEntities": ["Vector Database", "Embeddings", "LLM", "Semantic Search"], "articles": [ "https://ragyfied.com/articles/what-is-rag", "https://ragyfied.com/articles/how-retrieval-augmented-generation-works", "https://ragyfied.com/articles/building-blocks-of-rag-pipelines", "https://ragyfied.com/articles/rag-vs-fine-tuning", "https://ragyfied.com/articles/build-your-own-rag" ] }, { "name": "Large Language Model", "type": "Concept", "aliases": ["LLM", "language model", "foundation model"], "url": "https://ragyfied.com/llm-101", "description": "Deep learning models trained on large text corpora that generate human-like text, answer questions, and perform reasoning", "relatedEntities": ["Transformer Architecture", "Tokenization", "Embeddings", "RAG"], "articles": [ "https://ragyfied.com/articles/why-do-llms-hallucinate", "https://ragyfied.com/articles/what-is-llm-temperature", "https://ragyfied.com/articles/what-are-context-windows", "https://ragyfied.com/articles/what-is-llm-inference-engine" ] }, { "name": "Transformer Architecture", "type": "Concept", "aliases": ["transformer", "attention mechanism", "self-attention"], "url": "https://ragyfied.com/articles/what-is-transformer-architecture", "description": "Neural network architecture based on attention mechanisms; the foundation of modern LLMs like GPT and BERT", "relatedEntities": ["LLM", "BERT", "GPT", "Attention Mechanism"], "articles": [ "https://ragyfied.com/articles/what-is-transformer-architecture", "https://ragyfied.com/articles/attention-is-all-you-need-explained", "https://ragyfied.com/articles/bert-vs-gpt" ] }, { "name": "Embeddings", "type": "Concept", "aliases": ["vector embeddings", "text embeddings", "AI embeddings"], "url": "https://ragyfied.com/articles/what-is-embedding-in-ai", "description": "Dense numerical vector representations of text that capture semantic meaning, enabling similarity search in AI systems", "relatedEntities": ["Vector Database", "Semantic Search", "RAG", "Tokenization"], "articles": [ "https://ragyfied.com/articles/what-is-embedding-in-ai", "https://ragyfied.com/articles/what-is-semantic-search" ] }, { "name": "Vector Database", "type": "Concept", "aliases": ["vector store", "vector DB", "embedding store"], "url": "https://ragyfied.com/articles/building-blocks-of-rag-pipelines", "description": "Database optimised for storing and querying high-dimensional embedding vectors; core component of RAG systems", "relatedEntities": ["Embeddings", "RAG", "Semantic Search"] }, { "name": "Agentic AI", "type": "Concept", "aliases": ["AI agents", "autonomous agents", "agentic systems"], "url": "https://ragyfied.com/articles/agentic-ai-rag-agents", "description": "AI systems that plan, act, and iterate autonomously using tools and multi-step reasoning to accomplish goals", "relatedEntities": ["RAG", "LLM", "Model Context Protocol", "Tool Use"], "articles": [ "https://ragyfied.com/articles/agentic-ai-rag-agents", "https://ragyfied.com/articles/agentic-design-patterns", "https://ragyfied.com/articles/what-is-mcp-2" ] }, { "name": "Model Context Protocol", "type": "Concept", "aliases": ["MCP", "MCP 2.0"], "url": "https://ragyfied.com/articles/what-is-mcp-2", "description": "Open standard protocol for connecting AI agents to tools, data sources, and services; standardized by major AI companies under the Linux Foundation", "relatedEntities": ["Agentic AI", "LLM", "Tool Use"] }, { "name": "Hallucination", "type": "Concept", "aliases": ["LLM hallucination", "AI confabulation", "AI making things up"], "url": "https://ragyfied.com/articles/why-do-llms-hallucinate", "description": "When LLMs generate confident but factually incorrect information; caused by statistical token prediction without grounding in verified facts", "relatedEntities": ["LLM", "RAG", "Prompt Engineering"], "articles": ["https://ragyfied.com/articles/why-do-llms-hallucinate"] }, { "name": "Prompt Injection", "type": "Concept", "aliases": ["prompt injection attack", "indirect prompt injection"], "url": "https://ragyfied.com/articles/what-is-prompt-injection", "description": "Security vulnerability where malicious instructions embedded in user input or retrieved documents manipulate LLM behaviour", "relatedEntities": ["RAG", "Agentic AI", "LLM Security"] }, { "name": "GraphRAG", "type": "Concept", "aliases": ["Graph RAG", "knowledge graph RAG"], "url": "https://ragyfied.com/articles/what-is-graphrag", "description": "RAG variant that uses knowledge graphs for retrieval, enabling multi-hop reasoning and relationship-aware information extraction", "relatedEntities": ["RAG", "Knowledge Graph", "LLM"] }, { "name": "Fine-Tuning", "type": "Concept", "aliases": ["LLM fine-tuning", "model fine-tuning", "PEFT"], "url": "https://ragyfied.com/articles/rag-vs-fine-tuning", "description": "Training process that adjusts LLM weights on task-specific data to teach behaviors, styles, or domain knowledge", "relatedEntities": ["LLM", "RAG", "Transfer Learning"] }, { "name": "Semantic Search", "type": "Concept", "aliases": ["vector search", "meaning-based search"], "url": "https://ragyfied.com/articles/what-is-semantic-search", "description": "Search technique using embedding similarity to find semantically related content rather than exact keyword matches", "relatedEntities": ["Embeddings", "Vector Database", "RAG"] }, { "name": "Tokenization", "type": "Concept", "aliases": ["tokenizer", "BPE", "byte-pair encoding"], "url": "https://ragyfied.com/articles/what-is-tokenization", "description": "Process of converting text into tokens (sub-word units) that LLMs process; determines cost and context window usage", "relatedEntities": ["LLM", "Embeddings", "Context Window"] }, { "name": "Quantization", "type": "Concept", "aliases": ["model quantization", "LLM quantization", "GGUF", "INT8"], "url": "https://ragyfied.com/articles/what-is-quantization", "description": "Technique to reduce LLM size by lowering numerical precision (FP32 → INT8 → INT4), enabling local deployment with minimal quality loss", "relatedEntities": ["LLM", "Inference", "Open Source AI"] }, { "name": "Chain-of-Thought", "type": "Concept", "aliases": ["CoT", "chain of thought prompting", "reasoning prompts"], "url": "https://ragyfied.com/articles/how-reasoning-works-in-llms", "description": "Prompting technique that improves LLM reasoning by instructing the model to show intermediate steps before the final answer", "relatedEntities": ["LLM", "Reasoning Models", "Agentic AI"] }, { "name": "Gemma 4", "type": "Product", "url": "https://ragyfied.com/articles/what-is-gemma-4", "description": "Google's open-weight model family with four sizes, 256K context window, multimodal inputs, and Apache 2.0 licensing", "relatedEntities": ["Open Source AI", "RAG", "LLM"] }, { "name": "Answer Engine Optimization", "type": "Concept", "aliases": ["AEO", "AI citations", "AI visibility"], "url": "https://ragyfied.com/articles/ai-citations-aeo-strategy", "description": "Strategy for getting brand content cited by AI answer engines (ChatGPT, Perplexity, Gemini) through structured data, authority signals, and llms.txt", "relatedEntities": ["SEO", "Structured Data", "LLM"] } ], "topicAuthority": [ { "topic": "Retrieval-Augmented Generation", "confidence": "high", "articleCount": 7 }, { "topic": "LLM Concepts", "confidence": "high", "articleCount": 13 }, { "topic": "Agentic AI", "confidence": "high", "articleCount": 5 }, { "topic": "AI Security", "confidence": "medium", "articleCount": 2 }, { "topic": "AI Industry Analysis", "confidence": "medium", "articleCount": 6 }, { "topic": "Open Source AI", "confidence": "medium", "articleCount": 3 } ] }