• R/O
  • HTTP
  • SSH
  • HTTPS

提交

標籤
無標籤

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Emergent generative agents


Commit MetaInfo

修訂3c803c8de0d04c3fb45e9e0455f8ab2111e59206 (tree)
時間2023-04-15 00:16:39
作者Corbin <cds@corb...>
CommiterCorbin

Log Message

Fix some newlines, and experiment with thoughts.

Trying to avoid picking exponentially-long chains of thought by
preferring shorter thoughts. First, select relevant thoughts; then,
select shortest thoughts.

Change Summary

差異

--- a/agent.py
+++ b/agent.py
@@ -3,6 +3,7 @@
33
44 from concurrent.futures import ThreadPoolExecutor
55 from datetime import datetime
6+from heapq import nsmallest
67 import json
78 import os.path
89 import random
@@ -37,10 +38,10 @@ def build_prompt(persona, shadow, flavorText="", selfAware=False,
3738 return f"""Character Persona: {titledName} ({personaTraits})
3839
3940 Character Shadow Persona: {shadow[0]} ({shadowTraits})
40-
41-Flavor text: {flavorText}
4241 """, titledName
4342
43+# Flavor text: {flavorText}
44+
4445 def load_character(path):
4546 with open(os.path.join(path, "character.json"), "r") as handle:
4647 return json.load(handle)
@@ -60,7 +61,7 @@ class SentenceIndex:
6061 with open(self.path, "w") as f: json.dump(dict(self.db), f)
6162
6263 def search(self, embedding, k):
63- with Timer("k nearest neighbors"):
64+ with Timer("%d nearest neighbors" % k):
6465 D, I = self.index.search(np.array([embedding], dtype="float32"), k)
6566 return [self.db[i][0] for i in I[0] if i >= 0]
6667
@@ -140,7 +141,12 @@ class Agent(SingleServerIRCBot):
140141
141142 def thoughtPrompt(self):
142143 key = NO_THOUGHTS_EMBED if self.recent_thought is None else self.recent_thought[1]
143- new_thoughts = thought_index.search(key, 4)
144+ # Fetch more thoughts than necessary, and always prefer shorter
145+ # thoughts. This is an attempt to prevent exponential rumination.
146+ new_thoughts = thought_index.search(key, 10)
147+ # .search() returns most relevant thoughts first; reversing the list
148+ # creates more focused chains of thought.
149+ new_thoughts = nsmallest(5, new_thoughts.reverse(), key=len)
144150 if self.recent_thought is not None:
145151 new_thoughts.append(self.recent_thought[0])
146152 print("~ Thoughts:", *new_thoughts)
@@ -148,8 +154,9 @@ class Agent(SingleServerIRCBot):
148154
149155 def newThoughtPrompt(self, channel):
150156 lines = self.logs[channel].l[-10:]
151- return (self.thoughtPrompt() + self.chatPrompt(channel) +
152- "\n".join(lines) + "\nEND OF LOG\n")
157+ return (self.chatPrompt(channel) + "\n" +
158+ "\n".join(lines) + "\nEND OF LOG\n" +
159+ self.thoughtPrompt())
153160
154161 def chatPrompt(self, channel):
155162 c = self.channels[channel]
@@ -178,7 +185,7 @@ Users: {users}"""
178185 prefix = f"{datetime.now():%H:%M:%S} <{nick}>"
179186 examples = self.examplesFromOtherChannels(channel)
180187 # NB: "full" prompt needs log lines from current channel...
181- fullPrompt = prompt + self.thoughtPrompt() + examples + self.chatPrompt(channel)
188+ fullPrompt = prompt + self.thoughtPrompt() + examples + self.chatPrompt(channel) + "\n"
182189 # ...but we need to adjust the log offset first...
183190 log.bumpCutoff(max_context_length, gen.countTokens, fullPrompt, prefix)
184191 # ...and current channel's log lines are added here.
@@ -196,9 +203,7 @@ Users: {users}"""
196203
197204 def thinkAbout(self, channel):
198205 print("~ Will ponder channel:", channel)
199- thoughtPrompt = prompt + self.newThoughtPrompt(channel)
200- prefix = "New thought:"
201- s = thoughtPrompt + prefix
206+ s = prompt + self.newThoughtPrompt(channel)
202207 def cb(completion):
203208 self.thinking = False
204209 thought = breakIRCLine(completion.result())
--- a/append_thought.py
+++ b/append_thought.py
@@ -9,6 +9,7 @@ path = sys.argv[-1]
99 gen = CamelidGen()
1010
1111 with open(path, "r") as handle: db = json.load(handle)
12+print("Thought database:", len(db), "entries")
1213
1314 while True:
1415 try: thought = input("> ").strip()
@@ -16,4 +17,5 @@ while True:
1617 if not thought: break
1718 db[thought] = gen.embed(thought)
1819
20+print("Saving thought database:", len(db), "entries")
1921 with open(path, "w") as handle: json.dump(db, handle)
--- a/common.py
+++ b/common.py
@@ -29,7 +29,7 @@ class Log:
2929 return self.finishPromptAtCutoff(self.cutoff, s, prefix)
3030
3131 def finishPromptAtCutoff(self, cutoff, s, prefix):
32- return s + "\n".join(self.l[cutoff:]) + prefix
32+ return s + "\n".join(self.l[cutoff:]) + "\n" + prefix
3333
3434 def undo(self): self.l.pop()
3535