HomeSample Page

Sample Page Title


class MetaAgent:
   def __init__(self, llm: Non-obligatory[LocalLLM] = None):
       self.llm = llm or LocalLLM()


   def _capability_heuristics(self, process: str) -> Dict[str, Any]:
       t = process.decrease()


       needs_data = any(okay in t for okay in ["csv", "dataframe", "pandas", "dataset", "table", "excel"])
       needs_math = any(okay in t for okay in ["calculate", "compute", "probability", "equation", "optimize", "derivative", "integral"])
       needs_writing = any(okay in t for okay in ["write", "draft", "email", "cover letter", "proposal", "summarize", "rewrite"])
       needs_analysis = any(okay in t for okay in ["analyze", "insights", "trend", "compare", "benchmark"])
       needs_memory = any(okay in t for okay in ["long", "multi-step", "remember", "plan", "workflow", "pipeline"])


       return {
           "needs_data": needs_data,
           "needs_math": needs_math,
           "needs_writing": needs_writing,
           "needs_analysis": needs_analysis,
           "needs_memory": needs_memory,
       }


   def design(self, task_description: str) -> AgentConfig:
       caps = self._capability_heuristics(task_description)
       instruments = default_tool_registry()


       chosen: Checklist[ToolSpec] = []
       chosen.append(ToolSpec(
           identify="calc",
           description="Consider a protected mathematical expression (no arbitrary code).",
           inputs_schema={"sort":"object","properties":{"expression":{"sort":"string"}}, "required":["expression"]}
       ))
       chosen.append(ToolSpec(
           identify="text_stats",
           description="Compute fundamental statistics a few textual content blob (phrases, traces, distinctive phrases).",
           inputs_schema={"sort":"object","properties":{"textual content":{"sort":"string"}}, "required":["text"]}
       ))
       if caps["needs_data"]:
           chosen.append(ToolSpec(
               identify="csv_profile",
               description="Load a CSV from a neighborhood path and print a fast profile (head, describe).",
               inputs_schema={"sort":"object","properties":{"path":{"sort":"string"},"n_rows":{"sort":"integer"}}, "required":["path"]}
           ))


       if caps["needs_memory"] or caps["needs_analysis"] or caps["needs_data"]:
           mem = MemorySpec(sort="retrieval_tfidf", max_items=250, retrieval_k=6)
       else:
           mem = MemorySpec(sort="scratchpad", max_items=120, retrieval_k=5)


       if caps["needs_analysis"] or caps["needs_data"] or caps["needs_memory"]:
           planner = PlannerSpec(sort="react", max_steps=12, temperature=0.2)
       else:
           planner = PlannerSpec(sort="react", max_steps=8, temperature=0.2)


       goal = "Remedy the consumer process with software use when useful; produce a clear ultimate response."
       cfg = AgentConfig(
           agent_name="AutoDesignedAgent",
           goal=goal,
           planner=planner,
           reminiscence=mem,
           instruments=chosen,
           output_style="concise",
       )


       for ts in chosen:
           if not instruments.has(ts.identify):
               elevate RuntimeError(f"Instrument chosen however not registered: {ts.identify}")


       return cfg


   def instantiate(self, cfg: AgentConfig) -> AgentRuntime:
       instruments = default_tool_registry()
       if cfg.reminiscence.sort == "retrieval_tfidf":
           mem = TfidfRetrievalMemory(max_items=cfg.reminiscence.max_items, retrieval_k=cfg.reminiscence.retrieval_k)
       else:
           mem = ScratchpadMemory(max_items=cfg.reminiscence.max_items)
       return AgentRuntime(config=cfg, llm=self.llm, instruments=instruments, reminiscence=mem)


   def consider(self, process: str, reply: str) -> Dict[str, Any]:
       a = (reply or "").strip().decrease()
       flags = {
           "empty": len(a) == 0,
           "generic": any(p in a for p in ["i can't", "cannot", "missing", "provide more details", "parser fallback"]),
           "mentions_max_steps": "max steps" in a,
       }
       rating = 1.0
       if flags["empty"]: rating -= 0.6
       if flags["generic"]: rating -= 0.25
       if flags["mentions_max_steps"]: rating -= 0.2
       rating = max(0.0, min(1.0, rating))
       return {"rating": rating, "flags": flags}


   def refine(self, cfg: AgentConfig, eval_report: Dict[str, Any], process: str) -> AgentConfig:
       new_cfg = cfg.model_copy(deep=True)


       if eval_report["flags"]["generic"] or eval_report["flags"]["mentions_max_steps"]:
           new_cfg.planner.max_steps = min(18, new_cfg.planner.max_steps + 6)
           new_cfg.planner.temperature = min(0.35, new_cfg.planner.temperature + 0.05)
           if new_cfg.reminiscence.sort != "retrieval_tfidf":
               new_cfg.reminiscence.sort = "retrieval_tfidf"
               new_cfg.reminiscence.max_items = max(new_cfg.reminiscence.max_items, 200)
               new_cfg.reminiscence.retrieval_k = max(new_cfg.reminiscence.retrieval_k, 6)


       t = process.decrease()
       if any(okay in t for okay in ["csv", "dataframe", "pandas", "dataset", "table"]):
           if not any(ts.identify == "csv_profile" for ts in new_cfg.instruments):
               new_cfg.instruments.append(ToolSpec(
                   identify="csv_profile",
                   description="Load a CSV from a neighborhood path and print a fast profile (head, describe).",
                   inputs_schema={"sort":"object","properties":{"path":{"sort":"string"},"n_rows":{"sort":"integer"}}, "required":["path"]}
               ))


       return new_cfg


   def build_and_run(self, process: str, improve_rounds: int = 1, verbose: bool = True) -> Tuple[str, AgentConfig]:
       cfg = self.design(process)
       agent = self.instantiate(cfg)


       if verbose:
           print("n==============================")
           print("META-AGENT: DESIGNED CONFIG")
           print("==============================")
           print(cfg.model_dump_json(indent=2))


       ans = agent.run(process, verbose=verbose)
       report = self.consider(process, ans)


       if verbose:
           print("n==============================")
           print("EVALUATION REPORT")
           print("==============================")
           print(json.dumps(report, indent=2))
           print("n==============================")
           print("FINAL ANSWER")
           print("==============================")
           print(ans)


       for r in vary(improve_rounds):
           if report["score"] >= 0.85:
               break
           cfg = self.refine(cfg, report, process)
           agent = self.instantiate(cfg)
           if verbose:
               print(f"nn==============================")
               print(f"SELF-IMPROVEMENT ROUND {r+1}: UPDATED CONFIG")
               print("==============================")
               print(cfg.model_dump_json(indent=2))
           ans = agent.run(process, verbose=verbose)
           report = self.consider(process, ans)
           if verbose:
               print("nEVAL:", json.dumps(report, indent=2))
               print("nANSWER:n", ans)


       return ans, cfg


meta = MetaAgent()


examples = [
   "Design an agent workflow to summarize a long meeting transcript and extract action items. Keep it concise.",
   "I have a local CSV at /content/sample.csv. Profile it and tell me the top 3 insights.",
   "Compute the monthly payment for a $12,000 loan at 8% APR over 36 months. Show the formula briefly.",
]


print("n==============================")
print("RUNNING A QUICK DEMO TASK")
print("==============================")
demo_task = examples[2]
_ = meta.build_and_run(demo_task, improve_rounds=1, verbose=True)

Related Articles

LEAVE A REPLY

Please enter your comment!
Please enter your name here

Latest Articles