LumenScopeAI BrainTransformers

This revision is from 2024/10/18 01:35. You can Restore it.

Custom transfomer that comes with git clone: BrainTransformers-SNN-LLM/transformers/models/braingpt/configuration_braingpt.py

Add:

from transformers import PretrainedConfig

git clone https://github.com/LumenScopeAI/BrainTransformers-SNN-LLM.git

cd BrainTransformers-SNN-LLM/

python3 -m venv venv

source venv/bin/activate

pip install -r requirements.txt

sudo apt-get install git-lfs

git lfs install

git clone https://huggingface.co/LumenscopeAI/BrainTransformers-3B-Chat

BrainTransformers-SNN-LLM/transformers/models/braingpt/modeling_braingpt.py

def load_silu_approximator(device, dtype):

act_fn = SiLUApproximator().to(device).to(dtype)

pos_checkpoint = os.path.join(os.path.dirname(__file__), 'model_pos.pth')

neg_checkpoint = os.path.join(os.path.dirname(__file__), 'model_neg.pth')

if os.path.exists(pos_checkpoint) and os.path.exists(neg_checkpoint):

act_fn.pos_model.load_state_dict(

torch.load(pos_checkpoint, map_location=device, weights_only=True)

)

act_fn.neg_model.load_state_dict(

torch.load(neg_checkpoint, map_location=device, weights_only=True)

)

"""

act_fn.pos_model.load_state_dict(

torch.load(pos_checkpoint, map_location=device)

)

act_fn.neg_model.load_state_dict(

torch.load(neg_checkpoint, map_location=device)

)

"""

else:

raise FileNotFoundError(

f"SiLUApproximator parameters not found at {pos_checkpoint} and {neg_checkpoint}"

)

return act_fn

Open run.py and change the model path to BrainTransformers-3B-Chat

import torch

from transformers import AutoTokenizer, BrainGPTForCausalLM

from tqdm import tqdm

from torch.utils.data import DataLoader

# Define the model and tokenizer path

model_path = "BrainTransformers-3B-Chat"

# Load the model and tokenizer

print("Loading model and tokenizer...")

model = BrainGPTForCausalLM.from_pretrained(model_path)

tokenizer = AutoTokenizer.from_pretrained(model_path)

# Move the model to GPU (if available)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model.to(device)

print(f"Model loaded on {device}")

# Define a function to generate text

def generate_text(messages, max_new_tokens=200, temperature=0.7, top_k=50, top_p=0.95, do_sample=True):

text = tokenizer.apply_chat_template(

messages,

tokenize=False,

add_generation_prompt=True

)

model_inputs = tokenizer([text], return_tensors="pt").to(device)

with torch.no_grad():

generated_ids = model.generate(

**model_inputs,

max_new_tokens=max_new_tokens,

temperature=temperature,

top_k=top_k,

top_p=top_p,

do_sample=do_sample,

pad_token_id=tokenizer.eos_token_id

)

generated_ids = [

output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)

]

response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

return response

# Test the generation function

def test_generation(test_prompts):

print("\nTesting text generation:")

for prompt in test_prompts:

messages = [

{"role": "system", "content": "You are a knowledgeable assistant."},

{"role": "user", "content": prompt}

]

print(f"\nPrompt: {prompt}")

generated = generate_text(messages)

print(f"Generated: {generated}")

# Define a function to interact with the model

def interact_with_model():

while True:

user_input = input("You: ")

if user_input.lower() in ["exit", "quit"]:

break

messages = [

{"role": "system", "content": "You are a knowledgeable assistant."},

{"role": "user", "content": user_input}

]

response = generate_text(messages)

print(f"Assistant: {response}")

test_prompts = [

"Please explain the Pythagorean theorem!",

"What is artificial intelligence?",

"Write a poem about spring.",

"Explain the basic principles of quantum computing.",

"How to make a simple pizza?"

]

print("\nTesting text generation before STDP training:")

test_generation(test_prompts)

# Interact with the model

print("\nYou can now interact with the model. Type 'exit' or 'quit' to stop.")

interact_with_model()

  

📝 📜 ⏱️ ⬆️