HomeSample Page

Sample Page Title


def openai_chat(system: str, consumer: str) -> str:
   resp = consumer.chat.completions.create(
       mannequin=OPENAI_MODEL,
       messages=[
           {"role": "system", "content": system},
           {"role": "user", "content": user},
       ],
       temperature=0.3
   )
   return resp.decisions[0].message.content material


def heuristic_responder(context: str, query: str) -> str:
   classes = re.findall(r"Classes=(.*)", context)
   keep away from = re.findall(r"Keep away from=(.*)", context)
   ltm_lines = [ln for ln in context.splitlines() if ln.startswith("[LTM:")]


   steps = []
   if classes:
       for chunk in classes[:2]:
           for s in [x.strip() for x in chunk.split(";") if x.strip()]:
               steps.append(s)
   for ln in ltm_lines:
       if "[LTM:procedure]" in ln.decrease():
           proc = re.sub(r"^[LTM:procedure]s*", "", ln, flags=re.I)
           proc = proc.cut up("(salience=")[0].strip()
           for half in [p.strip() for p in proc.split("|") if p.strip()]:
               steps.append(half)


   steps = steps[:8] if steps else ["Clarify the target outcome and constraints.", "Use semantic recall + episodic lessons to propose a plan.", "Execute, then store lessons learned."]


   pitfalls = []
   if keep away from:
       for chunk in keep away from[:2]:
           for s in [x.strip() for x in chunk.split(";") if x.strip()]:
               pitfalls.append(s)
   pitfalls = pitfalls[:6]


   prefs = [ln for ln in ltm_lines if "[LTM:preference]" in ln.decrease()]
   information = [ln for ln in ltm_lines if "[LTM:fact]" in ln.decrease() or "[LTM:constraint]" in ln.decrease()]


   out = []
   out.append("Reply (memory-informed, offline fallback)n")
   if prefs:
       out.append("Related preferences/constraints remembered:")
       for ln in (prefs + information)[:6]:
           out.append(" - " + ln.cut up("] ",1)[1].cut up(" (salience=")[0].strip())
       out.append("")
   out.append("Beneficial strategy:")
   for i, s in enumerate(steps, 1):
       out.append(f" {i}. {s}")
   if pitfalls:
       out.append("nPitfalls to keep away from (from episodic traces):")
       for p in pitfalls:
           out.append(" - " + p)
   out.append("n(Should you add an API key, the identical reminiscence context will feed a stronger LLM for higher-quality responses.)")
   return "n".be part of(out).strip()


class MemoryAugmentedAgent:
   def __init__(self, mem: MemoryEngine):
       self.mem = mem


   def reply(self, query: str) -> Dict[str, Any]:
       pack = self.mem.retrieve(query)
       context = self.mem.build_context(query, pack)


       system = (
           "You're a memory-augmented agent. Use the supplied reminiscence context.n"
           "Prioritize:n"
           "1) Episodic classes (what labored earlier than)n"
           "2) Lengthy-term information/preferences/proceduresn"
           "3) Quick-term dialog staten"
           "Be concrete and stepwise. If reminiscence conflicts, state the uncertainty."
       )


       if USE_OPENAI:
           reply = openai_chat(system=system, consumer=context + "nnUser query:n" + query)
       else:
           reply = heuristic_responder(context=context, query=query)


       self.mem.st_add("consumer", query, sort="message")
       self.mem.st_add("assistant", reply, sort="message")


       return {"reply": reply, "pack": pack, "context": context}


mem = MemoryEngine()
agent = MemoryAugmentedAgent(mem)


mem.ltm_add(sort="choice", textual content="Favor concise, structured solutions with steps and bullet factors when useful.", tags=["style"], pinned=True)
mem.ltm_add(sort="choice", textual content="Favor options that run on Google Colab with out additional setup.", tags=["environment"], pinned=True)
mem.ltm_add(sort="process", textual content="When constructing agent reminiscence: embed gadgets, retailer with salience/novelty coverage, retrieve with hybrid semantic+episodic, and decay overuse to keep away from repetition.", tags=["agent-memory"])
mem.ltm_add(sort="constraint", textual content="If no API secret's obtainable, present a runnable offline fallback as an alternative of failing.", tags=["robustness"], pinned=True)


mem.episode_add(
   job="Construct an agent reminiscence layer for troubleshooting Python errors in Colab",
   constraints={"offline_ok": True, "single_notebook": True},
   plan=[
       "Capture short-term chat context",
       "Store durable constraints/preferences in long-term vector memory",
       "After solving, extract lessons into episodic traces",
       "On new tasks, retrieve top episodic lessons + semantic facts"
   ],
   actions=[
       {"type":"analysis", "detail":"Identified recurring failure: missing installs and version mismatches."},
       {"type":"action", "detail":"Added pip install block + minimal fallbacks."},
       {"type":"action", "detail":"Added memory policy: pin constraints, drop low-salience items."}
   ],
   end result="Pocket book turned strong: runs with or with out exterior keys; troubleshooting high quality improved with episodic classes.",
   outcome_score=0.90,
   classes=[
       "Always include a pip install cell for non-standard deps.",
       "Pin hard constraints (e.g., offline fallback) into long-term memory.",
       "Store a post-task 'lesson list' as an episodic trace for reuse."
   ],
   failure_modes=[
       "Assuming an API key exists and crashing when absent.",
       "Storing too much noise into long-term memory causing irrelevant recall context."
   ],
   tags=["colab","robustness","memory"]
)


print("✅ Reminiscence engine initialized.")
print(f"   LTM gadgets: {len(mem.ltm)} | Episodes: {len(mem.episodes)} | ST gadgets: {len(mem.short_term)}")


q1 = "I need to construct reminiscence for an agent in Colab. What ought to I retailer and the way do I retrieve it?"
out1 = agent.reply(q1)
print("n" + "="*90)
print("Q1 REPLYn")
print(out1["reply"][:1800])


q2 = "How do I keep away from my agent repeating the identical reminiscence time and again?"
out2 = agent.reply(q2)
print("n" + "="*90)
print("Q2 REPLYn")
print(out2["reply"][:1800])


def simple_outcome_eval(textual content: str) -> float:
   hits = 0
   for kw in ["decay", "usage", "penalty", "novelty", "prune", "retrieve", "episodic", "semantic"]:
       if kw in textual content.decrease():
           hits += 1
   return float(np.clip(hits/8.0, 0.0, 1.0))


score2 = simple_outcome_eval(out2["reply"])
mem.episode_add(
   job="Forestall repetitive recall in a memory-augmented agent",
   constraints={"must_be_simple": True, "runs_in_colab": True},
   plan=[
       "Track usage counts per memory item",
       "Apply usage-based penalty during ranking",
       "Boost novelty during storage to reduce duplicates",
       "Optionally prune low-salience memories"
   ],
   actions=[
       {"type":"design", "detail":"Added usage-based penalty 1/(1+alpha*usage)."},
       {"type":"design", "detail":"Used novelty = 1 - max_similarity at store time."}
   ],
   end result=out2["reply"][:600],
   outcome_score=score2,
   classes=[
       "Penalize overused memories during ranking (usage decay).",
       "Enforce novelty threshold at storage time to prevent duplicates.",
       "Keep episodic lessons distilled to avoid bloated recall context."
   ],
   failure_modes=[
       "No usage tracking, causing one high-similarity memory to dominate forever.",
       "Storing raw chat logs as LTM instead of distilled summaries."
   ],
   tags=["ranking","decay","policy"]
)


cons = mem.consolidate()
print("n" + "="*90)
print("CONSOLIDATION RESULT:", cons)


print("n" + "="*90)
print("LTM (high rows):")
show(mem.ltm_df().head(12))


print("n" + "="*90)
print("EPISODES (high rows):")
show(mem.episodes_df().head(12))


def debug_retrieval(question: str):
   pack = mem.retrieve(question)
   ctx = mem.build_context(question, pack)
   sem = []
   for mid, sc in pack["semantic_scored"]:
       it = mem.ltm[mid]
       sem.append({"mem_id": mid, "rating": sc, "sort": it.sort, "salience": it.salience, "utilization": it.utilization, "textual content": it.textual content[:160]})
   ep = []
   for eid, sc in pack["episodic_scored"]:
       e = mem.episodes[eid]
       ep.append( ".be part of(e.classes[:4]))
   return ctx, pd.DataFrame(sem), pd.DataFrame(ep)


print("n" + "="*90)
ctx, sem_df, ep_df = debug_retrieval("How do I design an agent reminiscence coverage for storage and retrieval?")
print(ctx[:1600])
print("nTop semantic hits:")
show(sem_df)
print("nTop episodic hits:")
show(ep_df)


print("n✅ Accomplished. You now have working short-term, long-term vector, and episodic reminiscence with storage/retrieval insurance policies in a single Colab snippet.")

Related Articles

LEAVE A REPLY

Please enter your comment!
Please enter your name here

Latest Articles