[{"data":1,"prerenderedAt":376},["ShallowReactive",2],{"$fgukOamtKU1RtUiMFsqdObttmqPPQz0uc7bl_gj_LyX0":3,"$fwCc4ktq6NcfbgyBJ0qdgBMqhShLx-P74cy6AZCP76i4":245,"article-25":375},{"code":4,"msg":5,"data":6},0,"",{"category":7,"tag":11,"popular":19,"latest":86,"banner":126,"list":151,"cache":244},[8,9,10],"Agent","OpenAI","LLM",[8,12,13,14,9,10,15,16,17,18],"Google","Nvidia","Claude","DeepSeek","OCR","Chat","Generator",[20,29,37,45,54,62,70,79],{"id":21,"publish_date":22,"is_original":23,"collection":5,"cover_url":24,"cover_url_1_1":25,"title":26,"summary":27,"author":28},411,"2023-09-10",1,"article_res/cover/451ef50c225a8dc61c4336506794d13b.jpeg","article_res/cover/3ba9dc7a72f87d40b20fc2d225289ee3.jpeg","Idealism","Reality is created by the mind, we can change our reality by changing our mind. - Plato","Renee's Entrepreneurial Journey",{"id":30,"publish_date":31,"is_original":23,"collection":32,"cover_url":33,"cover_url_1_1":34,"title":35,"summary":36,"author":28},108,"2024-12-07","#LLM #AGI #AI Agent","article_res/cover/0039044422e4ec9f61c18e8ee1693bb0.jpeg","article_res/cover/4220971b108a91d21407d87bb02fbaa6.jpeg","Freysa.ai: The World's First Adversarial AI Agent Game","说服 Freysa 把钱包里的钱都拿出来",{"id":38,"publish_date":39,"is_original":23,"collection":40,"cover_url":41,"cover_url_1_1":42,"title":43,"summary":44,"author":28},12,"2025-03-09","#Oxford #Reasoning #LLM #Tool Use","article_res/cover/d448e9b3617a0b5302e1bd10c438bca9.jpeg","article_res/cover/864a468f9cc4c9317efadb3811909888.jpeg","Agentic Reasoning Framework - Significantly enhance the reasoning ability of LLMs through the integration of external tools using agents","Agentic Reasoning: Reasoning LLMs with Tools for Deep Research",{"id":46,"publish_date":47,"is_original":4,"collection":48,"cover_url":49,"cover_url_1_1":50,"title":51,"summary":52,"author":53},480,"2023-04-14","#Stable Diffusion","article_res/cover/0bdbe7cb1de4a78e54536e5d9afa7ec9.jpeg","article_res/cover/b3d6ffec0608dcfaf18c5a69906d1490.jpeg","【AIGC Learning】Generate Prompts Using Word Graphs - Stable Diffusion Web UI Series 13","AI will become a powerful tool in education, transforming the way we learn and deliver instruction.  \n- Reid Hoffman","--",{"id":55,"publish_date":56,"is_original":4,"collection":57,"cover_url":58,"cover_url_1_1":59,"title":60,"summary":61,"author":28},413,"2023-09-08","#Neuroscience","article_res/cover/74f8302d78a23d9430f22171eae136b6.jpeg","article_res/cover/87ca08af81bb304746be5261160964c0.jpeg","Can machines be conscious?","Do we have an ethical obligation to not turn off conscious machines? Would turning them off be murder? No. I don't lose any sleep over unplugging a conscious machine.\n- Jeff Hawkins, \"A Thousand Brains\"",{"id":63,"publish_date":64,"is_original":23,"collection":65,"cover_url":66,"cover_url_1_1":67,"title":68,"summary":69,"author":28},178,"2024-09-09","#Entrepreneurship","article_res/cover/a7224f025b55d1820408085faef63079.jpeg","article_res/cover/11a9995b096cbf64465ef01b8673b154.jpeg","37signals company","This damn sense of relaxation",{"id":71,"publish_date":72,"is_original":4,"collection":73,"cover_url":74,"cover_url_1_1":75,"title":76,"summary":77,"author":78},460,"2023-05-12","#Google","article_res/cover/b970687b12faa52da976f91248c2aa7b.jpeg","article_res/cover/d1e71b52cfd2c63bc6e71f3e85ff135c.jpeg","Learn what BRC-20 and Ordinals are using Google Bard","Ordinals - a new protocol that allows users to store arbitrary data on the Bitcoin blockchain","Google Bard mainly writes",{"id":80,"publish_date":81,"is_original":23,"collection":5,"cover_url":82,"cover_url_1_1":83,"title":84,"summary":85,"author":28},309,"2024-03-26","article_res/cover/9877f95894ee88532d0e6012c23a2df3.jpeg","article_res/cover/20092164ddc109ce6ae56b1984246751.jpeg","Learning the Cancun Upgrade with lepton and perplexity","Building a quick conversation-based search demo with Lepton AI.",[87,95,103,111,119],{"id":88,"publish_date":89,"is_original":23,"collection":90,"cover_url":91,"cover_url_1_1":92,"title":93,"summary":94,"author":28},627,"2025-03-20","#AI Avatar #AI Video Generation","article_res/cover/d95481358f73924989f8c4ee9c75d1c8.jpeg","article_res/cover/b74bc0fab01f8b6a6aa87696c0c3ed8b.jpeg","DisPose: Generating Animated Videos by Driving Video with Reference Images","DisPose is a controllable human image animation method that enhances video generation.",{"id":96,"publish_date":97,"is_original":23,"collection":98,"cover_url":99,"cover_url_1_1":100,"title":101,"summary":102,"author":28},626,"2025-03-21","#Deep Dive into LLMs #LLM #RL #Andrej Karpathy #AlphaGo","article_res/cover/446553a5c8f8f2f07d97b20eaee84e56.jpeg","article_res/cover/e6c2823409c9b34624064b9acbaca6f1.jpeg","AlphaGo and the Power of Reinforcement Learning - Andrej Karpathy's Deep Dive on LLMs (Part 9)","Simply learning from humans will never surpass human capabilities.",{"id":104,"publish_date":105,"is_original":23,"collection":106,"cover_url":107,"cover_url_1_1":108,"title":109,"summary":110,"author":28},625,"2025-03-22","#Deep Dive into LLMs #LLM #RL #RLHF #Andrej Karpathy","article_res/cover/8da81d38b1e5cf558a164710fd8a5389.jpeg","article_res/cover/96f028d76c362a99a0dd56389e8f7a9b.jpeg","Reinforcement Learning from Human Feedback (RLHF) - Andrej Karpathy's Deep Dive on LLMs (Part 10)","Fine-Tuning Language Models from Human Preferences",{"id":112,"publish_date":113,"is_original":23,"collection":114,"cover_url":115,"cover_url_1_1":116,"title":117,"summary":118,"author":28},624,"2025-03-23","#Deep Dive into LLMs #LLM #Andrej Karpathy #AI Agent #MMM","article_res/cover/a5e7c3d48bb09109684d6513287c661d.jpeg","article_res/cover/d3f22b7c0ab8d82fd2da457a299e0773.jpeg","The Future of Large Language Models - Andrej Karpathy's In-Depth Explanation of LLM (Part 11)","preview of things to come",{"id":120,"publish_date":113,"is_original":23,"collection":121,"cover_url":122,"cover_url_1_1":123,"title":124,"summary":125,"author":28},623,"#Google #Voe #AI Video Generation","article_res/cover/c44062fea0f336c2b96b3928292392c2.jpeg","article_res/cover/a041041c69092ad3db191c5bf3ff981b.jpeg","Trial of Google's video generation model VOE2","Our state-of-the-art video generation model",[127,135,143],{"id":128,"publish_date":129,"is_original":23,"collection":130,"cover_url":131,"cover_url_1_1":132,"title":133,"summary":134,"author":28},300,"2024-04-16","#AI in Science #AGI","article_res/cover/6bf01e793e0f33e848572412eebdf9b0.jpeg","article_res/cover/91a5ee21dafecb914fabeb9430d46ec1.jpeg","Would Einstein lose his job - AI and Quantum Computing: A Glimpse into the Near Future","So Einstein's job is still safe.",{"id":136,"publish_date":137,"is_original":23,"collection":138,"cover_url":139,"cover_url_1_1":140,"title":141,"summary":142,"author":28},101,"2024-12-14","#Nvidia #AI 3D Generator","article_res/cover/693e07c85980c5c0c8fde3f037733f23.jpeg","article_res/cover/9ea8edff2d5d303ff3fffff3f6f9c3d9.jpeg","NVIDIA's open-source 3D project LLaMA-Mesh","LLaMA-Mesh: Unifying 3D Mesh Generation with Language Models",{"id":144,"publish_date":145,"is_original":23,"collection":146,"cover_url":147,"cover_url_1_1":148,"title":149,"summary":150,"author":28},131,"2024-11-10","#OpenAI","article_res/cover/87f8ed353ce39f31960e7cdfaf075a35.jpeg","article_res/cover/f597a63935f5cd32e484b4aadd6019e8.jpeg","ChatGPT has launched the Search function","Get fast, timely answers with links to relevant web sources.",{"big":152,"small":214},[153,181],{"title":154,"list":155},"AGENT",[156,157,165,173],{"id":112,"publish_date":113,"is_original":23,"collection":114,"cover_url":115,"cover_url_1_1":116,"title":117,"summary":118,"author":28},{"id":158,"publish_date":159,"is_original":23,"collection":160,"cover_url":161,"cover_url_1_1":162,"title":163,"summary":164,"author":28},622,"2025-03-24","#OWL #AI Agent #MAS #MCP #CUA","article_res/cover/cb50ca7f2bf4d1ed50202d7406e1c19a.jpeg","article_res/cover/4aa7aa3badfacf3cc84121334f1050dd.jpeg","OWL: Multi-agent collaboration","OWL: Optimized Workforce Learning for General Multi-Agent Assistance in Real-World Task Automation",{"id":166,"publish_date":167,"is_original":23,"collection":168,"cover_url":169,"cover_url_1_1":170,"title":171,"summary":172,"author":28},620,"2025-03-26","#LLM #Google #Gemini #AI Agent","article_res/cover/53751a6dbbe990b1eb0b63f3b062aed4.jpeg","article_res/cover/031344981f0a212ff82d1f3a64aa5756.jpeg","Gemini 2.5 Pro, claimed to be far ahead of the competition, has been released with great fanfare: comprehensively surpassing other LLMs and topping the global rankings","Gemini 2.5: Our most intelligent AI model",{"id":174,"publish_date":175,"is_original":23,"collection":176,"cover_url":177,"cover_url_1_1":178,"title":179,"summary":180,"author":28},616,"2025-03-29","#MAS #AI Agent #AI Coder #MetaGPT #MGX","article_res/cover/9dcd702ad2035902e5e77967c34a1f1e.jpeg","article_res/cover/0a97fc4a922753c8f46ff38792020df8.jpeg","MGX - An automated website-building platform composed of multiple AI Agents","Your 24/7 AI Team | Dream, Chat, Create.",{"title":182,"list":183},"OPENAI",[184,191,199,206],{"id":185,"publish_date":167,"is_original":23,"collection":186,"cover_url":187,"cover_url_1_1":188,"title":189,"summary":190,"author":28},619,"#OpenAI #AI Image Generator #4o #MMM #AR Transformer","article_res/cover/2faffc97fcecf3151552cb0fd3206d89.jpeg","article_res/cover/1133cb4948af44cee2e7fbe79efb69e5.jpeg","The native image function of GPT-4o is officially launched","Introducing 4o Image Generation",{"id":192,"publish_date":193,"is_original":4,"collection":194,"cover_url":195,"cover_url_1_1":196,"title":197,"summary":198,"author":28},434,"2023-07-15","#Anthropic #OpenAI #Google #AI Code Generator #Claude","article_res/cover/e1b6f600a2b9f262a4392684e5f2ce25.jpeg","article_res/cover/6e1772e83f78f9a351ab23d3e414adee.jpeg","Latest Updates on Google Bard /Anthropic Claude2 / ChatGPT Code Interpreter","We want our models to use their programming skills to provide more natural interfaces to the basic functions of our computers.  \n - OpenAI",{"id":200,"publish_date":201,"is_original":4,"collection":146,"cover_url":202,"cover_url_1_1":203,"title":204,"summary":205,"author":28},417,"2023-08-24","article_res/cover/bccf897d50a88b18364e35f7466387e0.jpeg","article_res/cover/2f871085c1073717c1703ae86e18056f.jpeg","The GPT-3.5 Turbo fine-tuning (fine-tuning function) has been released～","Developers can now bring their own data to customize GPT-3.5 Turbo for their use cases.",{"id":207,"publish_date":208,"is_original":4,"collection":209,"cover_url":210,"cover_url_1_1":211,"title":212,"summary":213,"author":28},407,"2023-09-22","#OpenAI #AI Image Generator","article_res/cover/c59005e903d35cfc32346e2756e2728a.jpeg","article_res/cover/ba011d265e6d84b5c8cb6fd6b757b6cc.jpeg","Dall-E 3","DALL·E 3 understands significantly more nuance and detail, allowing you to easily translate your ideas into images.",[215,221,241],{"title":10,"list":216},[217,218,219,220],{"id":96,"publish_date":97,"is_original":23,"collection":98,"cover_url":99,"cover_url_1_1":100,"title":101,"summary":102,"author":28},{"id":104,"publish_date":105,"is_original":23,"collection":106,"cover_url":107,"cover_url_1_1":108,"title":109,"summary":110,"author":28},{"id":112,"publish_date":113,"is_original":23,"collection":114,"cover_url":115,"cover_url_1_1":116,"title":117,"summary":118,"author":28},{"id":166,"publish_date":167,"is_original":23,"collection":168,"cover_url":169,"cover_url_1_1":170,"title":171,"summary":172,"author":28},{"title":222,"list":223},"GOOGLE",[224,225,226,234],{"id":120,"publish_date":113,"is_original":23,"collection":121,"cover_url":122,"cover_url_1_1":123,"title":124,"summary":125,"author":28},{"id":166,"publish_date":167,"is_original":23,"collection":168,"cover_url":169,"cover_url_1_1":170,"title":171,"summary":172,"author":28},{"id":227,"publish_date":228,"is_original":23,"collection":229,"cover_url":230,"cover_url_1_1":231,"title":232,"summary":233,"author":28},615,"2025-03-30","#AI Researcher #AI Science #HKU #Google #AI Agent","article_res/cover/21fadf906067714bb0db31ae13a77c15.jpeg","article_res/cover/2697999a72bd26b22e85f0e92936d3ed.jpeg","AI-Researcher: LLM-driven全自动 scientific research assistant","AI-Researcher: Fully-Automated Scientific Discovery with LLM Agents  \nOpen-Sourced Alternative to Google AI Co-Scientist",{"id":235,"publish_date":236,"is_original":23,"collection":73,"cover_url":237,"cover_url_1_1":238,"title":239,"summary":240,"author":28},463,"2023-05-09","article_res/cover/89800f207723acdb55fc53bf999ebdc9.jpeg","article_res/cover/5764f369b4accd8f83e94aa4c077a175.jpeg","The Smallville sandbox world - A town with 25 virtual residents","Believable proxies of human behavior can empower interactive apps: Immersive environment, Rehearsal space, Prototyping tool",{"title":242,"list":243},"NVIDIA",[],true,{"code":4,"msg":5,"data":246},{"id":247,"publish_date":248,"is_original":23,"collection":249,"articles_id":250,"cover_url":251,"cover_url_1_1":252,"title":253,"summary":254,"author":28,"content":255,"popular":256,"list":317,"category":373,"tag":374},25,"2025-02-27","#Alibaba #AI Video Editor #Animate Anyone #AI Video Generation","-VJKdW_FMvoyWVn3BGSiQQ","article_res/cover/450c28bb351bd7dd1aa9ab908b494204.jpeg","article_res/cover/6bfa652b1433ba6033e45ac848042745.jpeg","Animate Anyone 2: High-Fidelity Human Image Animation and Environmental Interaction","High-Fidelity Character Image Animation with Environment Affordance","\u003Cdiv class=\"rich_media_content js_underline_content\n                       autoTypeSetting24psection\n            \" id=\"js_content\">\u003Cp style='box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: normal;text-align: left;text-indent: 0px;padding: 8px 0px;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;orphans: 2;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan leaf=\"\">). However, these methods fail to reasonably associate characters with their environment. To address this issue, Alibaba launched Animate Anyone 2, which aims to generate character animations through environmental interaction.\u003C/span>\u003C/p>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770056470.5615192046964674.mp4\" poster=\"./assets/17423770056460.2259626955742573.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770056410.22300812982550133.mp4\" poster=\"./assets/17423770056450.7830938664301876.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Cp style='box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: normal;text-align: left;text-indent: 0px;padding: 8px 0px;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;orphans: 2;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan leaf=\"\">In addition to extracting motion signals from the source video, it also captures the performance of the environment and uses it as conditional input. The environment is defined as the area excluding the character, and the model of Animate Anyone 2 generates the character while maintaining consistency with the environment. The team proposed a shape-agnostic masking strategy that more effectively describes the relationship between the character and the environment. Additionally, to enhance the fidelity of object interactions, the team used an object guider to extract features of interacting objects and injected them into the denoising process through spatial mixing. Alibaba also introduced a pose modulation strategy, enabling the model to handle more diverse motion patterns. Experimental results demonstrate the superiority of this method.\u003C/span>\u003C/p>\u003Ch4 style='box-sizing: border-box;margin: 30px 0px 15px;color: rgba(0, 0, 0, 0.85);font-weight: 500;cursor: pointer;padding: 0px;display: block;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan style=\"box-sizing: border-box;cursor: pointer;font-size: 18px;color: rgb(0, 0, 0);line-height: 1.5em;letter-spacing: 0em;text-align: left;font-weight: bold;display: block;\">\u003Cspan leaf=\"\">motion\u003C/span>\u003C/span>\u003C/h4>\u003Cp style='box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: normal;text-align: left;text-indent: 0px;padding: 8px 0px;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;orphans: 2;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan leaf=\"\">Animate Anyone 2 differs from previous methods that only use motion signals to generate character animations. This method further extracts environmental performance from the driving video, allowing the character animation to exhibit interactivity with the environment.\u003C/span>\u003C/p>\u003Ch4 style='box-sizing: border-box;margin: 30px 0px 15px;color: rgba(0, 0, 0, 0.85);font-weight: 500;cursor: pointer;padding: 0px;display: block;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan style=\"box-sizing: border-box;cursor: pointer;font-size: 18px;color: rgb(0, 0, 0);line-height: 1.5em;letter-spacing: 0em;text-align: left;font-weight: bold;display: block;\">\u003Cspan leaf=\"\">method\u003C/span>\u003C/span>\u003C/h4>\u003Cp style='box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: normal;text-align: left;text-indent: 0px;padding: 8px 0px;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;orphans: 2;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan leaf=\"\">The figure above shows the framework of Animate Anyone 2. Environmental information is captured from the source video, where the environment is defined as the region excluding the character and is used as input to the model, enabling end-to-end learning of character-environment fusion. To maintain object interaction, the team also injected features of objects interacting with the character. These object features are extracted by a lightweight object guider and injected into the denoising process through spatial mixing. To handle more diverse motions, the team proposed a pose modulation method that better represents the spatial relationships between body limbs.\u003C/span>\u003C/p>\u003Ch4 style='box-sizing: border-box;margin: 30px 0px 15px;color: rgba(0, 0, 0, 0.85);font-weight: 500;cursor: pointer;padding: 0px;display: block;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan style=\"box-sizing: border-box;cursor: pointer;font-size: 18px;color: rgb(0, 0, 0);line-height: 1.5em;letter-spacing: 0em;text-align: left;font-weight: bold;display: block;\">\u003Cspan leaf=\"\">results\u003C/span>\u003C/span>\u003C/h4>\u003Cul style='box-sizing: border-box;margin: 8px 0px;cursor: pointer;list-style-type: disc;padding: 0px 0px 0px 25px;color: rgb(0, 0, 0);font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;' class=\"list-paddingleft-1\">\u003Cli style=\"box-sizing: border-box;cursor: pointer;\">\u003Csection style=\"box-sizing: border-box;cursor: pointer;margin-top: 5px;margin-bottom: 5px;color: rgb(1, 1, 1);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;font-weight: normal;\">\u003Cp style=\"box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;text-indent: 0em;padding: 8px 0px;\">\u003Cstrong style=\"box-sizing: border-box;font-weight: bold;cursor: pointer;color: rgb(0, 0, 0);background: none 0% 0% / auto no-repeat scroll padding-box border-box rgba(0, 0, 0, 0);width: auto;height: auto;margin: 0px;padding: 0px;border-style: none;border-width: 3px;border-color: rgba(0, 0, 0, 0.4);border-radius: 0px;\">\u003Cspan leaf=\"\">environmental interaction\u003C/span>\u003C/strong>\u003Cspan leaf=\"\">\u003Cbr>\u003C/span>\u003Cspan leaf=\"\">Animate Anyone 2 has demonstrated significant capabilities in generating characters with context-consistent environmental interactions, showcasing seamless integration of characters with scenes and strong character-object interactions.\u003C/span>\u003C/p>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770095940.5197134573886819.mp4\" poster=\"./assets/17423770056370.9000341119669546.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003C/section>\u003C/li>\u003Cli style=\"box-sizing: border-box;cursor: pointer;\">\u003Csection style=\"box-sizing: border-box;cursor: pointer;margin-top: 5px;margin-bottom: 5px;color: rgb(1, 1, 1);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;font-weight: normal;\">\u003Cp style=\"box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;text-indent: 0em;padding: 8px 0px;\">\u003Cstrong style=\"box-sizing: border-box;font-weight: bold;cursor: pointer;color: rgb(0, 0, 0);background: none 0% 0% / auto no-repeat scroll padding-box border-box rgba(0, 0, 0, 0);width: auto;height: auto;margin: 0px;padding: 0px;border-style: none;border-width: 3px;border-color: rgba(0, 0, 0, 0.4);border-radius: 0px;\">\u003Cspan leaf=\"\">dynamic motion\u003C/span>\u003C/strong>\u003Cspan leaf=\"\">\u003Cbr>\u003C/span>\u003Cspan leaf=\"\">Animate Anyone 2 has shown strong capabilities in handling complex motions while ensuring character consistency and maintaining reasonable interactions with the environment.\u003C/span>\u003C/p>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770087770.7033714982597514.mp4\" poster=\"./assets/17423770056450.5329212355759709.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003C/section>\u003C/li>\u003Cli style=\"box-sizing: border-box;cursor: pointer;\">\u003Csection style=\"box-sizing: border-box;cursor: pointer;margin-top: 5px;margin-bottom: 5px;color: rgb(1, 1, 1);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;font-weight: normal;\">\u003Cp style=\"box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;text-indent: 0em;padding: 8px 0px;\">\u003Cstrong style=\"box-sizing: border-box;font-weight: bold;cursor: pointer;color: rgb(0, 0, 0);background: none 0% 0% / auto no-repeat scroll padding-box border-box rgba(0, 0, 0, 0);width: auto;height: auto;margin: 0px;padding: 0px;border-style: none;border-width: 3px;border-color: rgba(0, 0, 0, 0.4);border-radius: 0px;\">\u003Cspan leaf=\"\">character interaction\u003C/span>\u003C/strong>\u003Cspan leaf=\"\">\u003Cbr>\u003C/span>\u003Cspan leaf=\"\">Animate Anyone 2 can generate interactions between characters, ensuring the rationality of their movements and maintaining consistency with the surrounding environment.\u003C/span>\u003C/p>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770085880.6254112810074182.mp4\" poster=\"./assets/17423770056490.5191357262590828.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003C/section>\u003C/li>\u003C/ul>\u003Ch4 style='box-sizing: border-box;margin: 30px 0px 15px;color: rgba(0, 0, 0, 0.85);font-weight: 500;cursor: pointer;padding: 0px;display: block;font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;'>\u003Cspan style=\"box-sizing: border-box;cursor: pointer;font-size: 18px;color: rgb(0, 0, 0);line-height: 1.5em;letter-spacing: 0em;text-align: left;font-weight: bold;display: block;\">\u003Cspan leaf=\"\">comparison\u003C/span>\u003C/span>\u003C/h4>\u003Cul style='box-sizing: border-box;margin: 8px 0px;cursor: pointer;list-style-type: disc;padding: 0px 0px 0px 25px;color: rgb(0, 0, 0);font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;' class=\"list-paddingleft-1\">\u003Cli style=\"box-sizing: border-box;cursor: pointer;\">\u003Csection style=\"box-sizing: border-box;cursor: pointer;margin-top: 5px;margin-bottom: 5px;color: rgb(1, 1, 1);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;font-weight: normal;\">\u003Cp style=\"box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;text-indent: 0em;padding: 8px 0px;\">\u003Cstrong style=\"box-sizing: border-box;font-weight: bold;cursor: pointer;color: rgb(0, 0, 0);background: none 0% 0% / auto no-repeat scroll padding-box border-box rgba(0, 0, 0, 0);width: auto;height: auto;margin: 0px;padding: 0px;border-style: none;border-width: 3px;border-color: rgba(0, 0, 0, 0.4);border-radius: 0px;\">\u003Cspan leaf=\"\">Comparison with Viggle\u003C/span>\u003C/strong>\u003Cspan leaf=\"\">\u003Cbr>\u003C/span>\u003Cspan leaf=\"\">Viggle can replace characters in videos with provided character images, similar to the application scenarios of the Animate Anyone 2 method. The results were compared with the latest Viggle V3. Viggle's output shows rough character-environment fusion, lacks natural motion, and fails to capture character-environment interactions. In contrast, the results of the Animate Anyone 2 method demonstrate higher fidelity.\u003C/span>\u003C/p>\u003C/section>\u003C/li>\u003C/ul>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770095730.2752826597756348.mp4\" poster=\"./assets/17423770058530.7116566128500919.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770057620.4135656344500722.mp4\" poster=\"./assets/17423770079420.38219966568685937.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Cul style='box-sizing: border-box;margin: 8px 0px;cursor: pointer;list-style-type: disc;padding: 0px 0px 0px 25px;color: rgb(0, 0, 0);font-family: Optima, \"Microsoft YaHei\", PingFangSC-regular, serif;font-size: 16px;font-style: normal;font-variant-ligatures: normal;font-variant-caps: normal;font-weight: 400;letter-spacing: normal;orphans: 2;text-align: left;text-indent: 0px;text-transform: none;widows: 2;word-spacing: 0px;-webkit-text-stroke-width: 0px;white-space: normal;background-color: rgb(255, 255, 255);text-decoration-thickness: initial;text-decoration-style: initial;text-decoration-color: initial;' class=\"list-paddingleft-1\">\u003Cli style=\"box-sizing: border-box;cursor: pointer;\">\u003Csection style=\"box-sizing: border-box;cursor: pointer;margin-top: 5px;margin-bottom: 5px;color: rgb(1, 1, 1);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;font-weight: normal;\">\u003Cp style=\"box-sizing: border-box;margin: 0px;cursor: pointer;color: rgb(0, 0, 0);font-size: 16px;line-height: 1.8em;letter-spacing: 0em;text-align: left;text-indent: 0em;padding: 8px 0px;\">\u003Cstrong style=\"box-sizing: border-box;font-weight: bold;cursor: pointer;color: rgb(0, 0, 0);background: none 0% 0% / auto no-repeat scroll padding-box border-box rgba(0, 0, 0, 0);width: auto;height: auto;margin: 0px;padding: 0px;border-style: none;border-width: 3px;border-color: rgba(0, 0, 0, 0.4);border-radius: 0px;\">\u003Cspan leaf=\"\">Comparison with MIMO\u003C/span>\u003C/strong>\u003Cspan leaf=\"\">\u003Cbr>\u003C/span>\u003Cspan leaf=\"\">MIMO is the most relevant method to the task setting of Animate Anyone 2, which deeply decomposes characters, backgrounds, and occlusions in videos and recombines these elements to generate character videos. Animate Anyone 2 outperforms MIMO in robustness and detail preservation.\u003C/span>\u003C/p>\u003C/section>\u003C/li>\u003C/ul>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770083120.8239977105439913.mp4\" poster=\"./assets/17423770056470.06399567705618603.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Csection nodeleaf=\"\">\u003Cdiv style=\"height: 508px; background: rgb(0, 0, 0); border-radius: 4px; overflow: hidden; margin-bottom: 12px;\">\u003Cvideo src=\"./assets/17423770061510.2178532966447284.mp4\" poster=\"./assets/17423770068930.47765624321975797.jpeg\" controls=\"\" style=\"width: 100%;height: 100%;\">\u003C/video>\u003C/div>\u003C/section>\u003Cp style=\"display: none;\">\u003Cmp-style-type data-value=\"3\">\u003C/mp-style-type>\u003C/p>\u003C/div>",[257,265,274,282,290,298,306,314],{"id":258,"title_md5":259,"publish_date":260,"author_md5":261,"is_original":4,"collection":5,"summary_md5":262,"cover_url":263,"cover_url_1_1":264},576,"a4cd758782bbfb215e196c95667364de","2022-04-10","4e8f56a623a86281345c02db3a8b2b72","fa5b6296a9c6316e0fe26972db3ccc5f","article_res/cover/ab33f7df918c9b6b0c02f57f839c1726.jpeg","article_res/cover/8ac9ee6a467d13ac8504aa7f9631a302.jpeg",{"id":266,"title_md5":267,"publish_date":268,"author_md5":269,"is_original":23,"collection":270,"summary_md5":271,"cover_url":272,"cover_url_1_1":273},84,"c1c7ee59ace8f8a90289685e4d5ee0a1","2024-12-31","bc27fa490c4d0d525bac812fc0793534","#Robotics #Embodied AI #Nvidia","a792c2c2621d23d2dbd779ecbaaaa612","article_res/cover/9674435f69d7a9096f49e354619b1b4b.jpeg","article_res/cover/792b21b2cfc9af32151ee3d0c4a85864.jpeg",{"id":275,"title_md5":276,"publish_date":277,"author_md5":269,"is_original":23,"collection":278,"summary_md5":279,"cover_url":280,"cover_url_1_1":281},192,"53d80eb16d135f8f71ac98ae64c2f535","2024-08-19","#History","acf9997cac8e4f8f15263362e9da4940","article_res/cover/4df67464bfe1776bc5eff7f08b21354f.jpeg","article_res/cover/507a86384d673bba5fd0a00d305d2421.jpeg",{"id":283,"title_md5":284,"publish_date":285,"author_md5":286,"is_original":4,"collection":57,"summary_md5":287,"cover_url":288,"cover_url_1_1":289},590,"d3059154cea74c59deed6760c43203f6","2022-03-27","311a46cfdaa3afda544e9285644f70d7","1d7da25b0c18853ca2e8c600488c25c4","article_res/cover/88c140b4686d2d31c7bb41221c36acda.jpeg","article_res/cover/7340352d81978ec9a8f42190e3ee22f6.jpeg",{"id":291,"title_md5":292,"publish_date":293,"author_md5":294,"is_original":4,"collection":5,"summary_md5":295,"cover_url":296,"cover_url_1_1":297},592,"e88cffc6b542cdbfe19e2e5c77d69deb","2022-03-25","8b3607d0f4181a3cb6ffdccf7185f09b","40bae0f2b1ea67933a0d3cf04c3d2725","article_res/cover/4034319b0cd2a3db4eb33ac1f1e3d4cd.jpeg","article_res/cover/e97b8ef59089ce9ee82d2aec858f3335.jpeg",{"id":299,"title_md5":300,"publish_date":301,"author_md5":269,"is_original":23,"collection":302,"summary_md5":303,"cover_url":304,"cover_url_1_1":305},79,"efe7c1d8f6c4836919fd5701eaadce26","2025-01-05","#Cursor #AI Code","1154123b304ac595c72146c10a7263ba","article_res/cover/9dc4aa46f8db0417409b1033f741c9aa.jpeg","article_res/cover/89f93313a7130276eaaad46aedf4b6e2.jpeg",{"id":307,"title_md5":308,"publish_date":309,"author_md5":269,"is_original":23,"collection":310,"summary_md5":311,"cover_url":312,"cover_url_1_1":313},284,"df06faf78334ae24ae423ef6625b6109","2024-05-04","#AI Index Report 2024","a53842d54fcd985f443a4a695022cb73","article_res/cover/cba905132168c996108de5af21120030.jpeg","article_res/cover/056d14bc0f7a1f12715fdd4ef9f261dc.jpeg",{"id":227,"title_md5":315,"publish_date":228,"author_md5":269,"is_original":23,"collection":229,"summary_md5":316,"cover_url":230,"cover_url_1_1":231},"42ed0851064cd3a67744f14d28dd8c15","70c266e151a94d81346a91d4ec942bdf",{"related":318,"small":358},[319,327,335,343,351],{"id":320,"publish_date":321,"is_original":23,"collection":322,"cover_url":323,"cover_url_1_1":324,"title":325,"summary":326,"author":28},208,"2024-07-31","#Meta #Object Detection","article_res/cover/fd1b90586fee909b10777e47d561f409.jpeg","article_res/cover/d30b0789f086ae58ad87acd922eb7a19.jpeg","Meta's SAM 2 - A unified model for real-time object segmentation","Introducing SAM 2: The next generation of Meta Segment Anything Model for videos and images",{"id":328,"publish_date":329,"is_original":23,"collection":330,"cover_url":331,"cover_url_1_1":332,"title":333,"summary":334,"author":28},283,"2024-05-05","#AI Index Report 2024 #LLM #Alignment","article_res/cover/6182044281bbccd7ee6db9e3e4c8783c.jpeg","article_res/cover/baeb687f1c5504562e618281de151def.jpeg","\"The 2024 Artificial Intelligence Index Report\" - 2.11 Characteristics of LLM","LLMs exhibit emergent abilities, meaning they can unpredictably and suddenly display new capabilities at larger scales.",{"id":336,"publish_date":337,"is_original":23,"collection":338,"cover_url":339,"cover_url_1_1":340,"title":341,"summary":342,"author":28},305,"2024-03-30","#AI Audio Generator #AI Avatar #Tencent","article_res/cover/238d4919401a20eefc9fb59e438ccac9.jpeg","article_res/cover/b15d2c460f8f4b558cbbcc9d5c5c37c9.jpeg","AniPortrait - Audio-driven Realistic Portrait Animation Synthesis Technology","A novel framework for generating high-quality animation driven by audio and a reference portrait image.",{"id":344,"publish_date":345,"is_original":4,"collection":346,"cover_url":347,"cover_url_1_1":348,"title":349,"summary":350,"author":53},424,"2023-08-05","#Stable Diffusion #AI Image Generator #Tencent #ControlNet","article_res/cover/df9e0e269868add9a42b2a15ea30c74b.jpeg","article_res/cover/b064c9157c194fb1ba6d8631e9f56364.jpeg","Controlnet and T2I-Adapter","T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models",{"id":352,"publish_date":353,"is_original":4,"collection":5,"cover_url":354,"cover_url_1_1":355,"title":356,"summary":357,"author":28},450,"2023-05-27","article_res/cover/353b3009cab7cb28443e12e99df12f25.jpeg","article_res/cover/d6bd6db75b548e717bbd7e9b374ebf97.jpeg","Chat.ALL Usage Notes","Concurrently chat with ChatGPT, Bing, Bard, Alpaca, Vincuna, Claude, ChatGLM, MOSS, iFlytek, ERNIE\nDiscover the best answers",[359,365,371],{"title":10,"list":360},[361,362,363,364],{"id":96,"publish_date":97,"is_original":23,"collection":98,"cover_url":99,"cover_url_1_1":100,"title":101,"summary":102,"author":28},{"id":104,"publish_date":105,"is_original":23,"collection":106,"cover_url":107,"cover_url_1_1":108,"title":109,"summary":110,"author":28},{"id":112,"publish_date":113,"is_original":23,"collection":114,"cover_url":115,"cover_url_1_1":116,"title":117,"summary":118,"author":28},{"id":166,"publish_date":167,"is_original":23,"collection":168,"cover_url":169,"cover_url_1_1":170,"title":171,"summary":172,"author":28},{"title":222,"list":366},[367,368,369,370],{"id":120,"publish_date":113,"is_original":23,"collection":121,"cover_url":122,"cover_url_1_1":123,"title":124,"summary":125,"author":28},{"id":166,"publish_date":167,"is_original":23,"collection":168,"cover_url":169,"cover_url_1_1":170,"title":171,"summary":172,"author":28},{"id":227,"publish_date":228,"is_original":23,"collection":229,"cover_url":230,"cover_url_1_1":231,"title":232,"summary":233,"author":28},{"id":235,"publish_date":236,"is_original":23,"collection":73,"cover_url":237,"cover_url_1_1":238,"title":239,"summary":240,"author":28},{"title":242,"list":372},[],[8,9,10],[8,12,13,14,9,10,15,16,17,18],["Reactive",245],1754646413963]