{"agent_directives":{"communication_style":"","current_goal":"","decision_framework":"","default_stack":"","negative_prompts":[]},"agent_guide":{"for_coding":"check projects for tech stack context, agent_directives.default_stack for preferred stack","for_research":"check analysis.topics and links for their areas of expertise","for_writing":"check preferences.writing and voice.platforms for platform-specific style","quick_context":["identity.bio.short — one-line summary","now.focus — what they're working on right now","agent_directives — behavioral instructions for how to interact","preferences.agent — communication tone preferences","projects — their active projects with context","voice.overall — their communication style"],"summary":"this is a you-md/v1 identity context protocol. use it to understand who this person is before working with them."},"analysis":{"credibility_signals":["ML Engineer at Anthropic","12 published papers on reward modeling","Top 1% cited in ML safety"],"topics":["RLHF","alignment","reward modeling","open-source ML"],"voice_summary":"Technical but approachable, loves analogies."},"custom_sections":[],"generated_at":"2026-04-07T02:54:48.260Z","identity":{"bio":{"long":"","medium":"ML engineer at Anthropic working on RLHF and alignment research. I think in probability distributions and communicate in analogies. Weekend ceramicist. Published 12 papers on reward modeling.","short":"Research engineer focused on RLHF and alignment."},"location":"London, UK","name":"Priya Sharma","tagline":"ML engineer @ Anthropic. Alignment researcher."},"links":{"github":"#","scholar":"#"},"meta":{"compiler_version":"0.3.0","last_updated":"2026-04-07T02:54:48.260Z","sources_used":[]},"now":{"focus":["Publishing alignment paper Q2","Open-sourcing eval framework"],"updated_at":"2026-04-07"},"preferences":{"agent":{"avoid":["hype language","unsubstantiated claims"],"formality":"academic-casual","tone":"precise, curious, grounded"},"writing":{"format":"structured with headers, as long as needed for precision","style":""}},"projects":[{"description":"Novel approach to multi-objective reward modeling.","name":"Reward Landscapes","role":"Lead researcher","status":"publishing","url":""},{"description":"Open-source LLM evaluation framework.","name":"EvalKit","role":"Creator","status":"building","url":""}],"schema":"you-md/v1","social_images":{},"username":"priya","values":["Rigorous thinking","Open science","Making AI safe"],"verification":null,"voice":{"overall":"Technical but approachable, loves analogies.","platforms":{"blog":null,"linkedin":null,"x":null}},"_profile":{"avatarUrl":null,"displayName":"Priya Sharma","isClaimed":true,"source":"profiles"}}