Linux websever 5.15.0-153-generic #163-Ubuntu SMP Thu Aug 7 16:37:18 UTC 2025 x86_64
Apache/2.4.52 (Ubuntu)
: 192.168.3.70 | : 192.168.1.99
Cant Read [ /etc/named.conf ]
8.1.2-1ubuntu2.23
urlab
www.github.com/MadExploits
Terminal
AUTO ROOT
Adminer
Backdoor Destroyer
Linux Exploit
Lock Shell
Lock File
Create User
CREATE RDP
PHP Mailer
BACKCONNECT
UNLOCK SHELL
HASH IDENTIFIER
CPANEL RESET
CREATE WP USER
README
+ Create Folder
+ Create File
/
var /
www /
html /
meetwize /
[ HOME SHELL ]
Name
Size
Permission
Action
__pycache__
[ DIR ]
drwxrwxrwx
static
[ DIR ]
drwxrwxrwx
templates
[ DIR ]
drwxrwxrwx
venv
[ DIR ]
drwxr-xr-x
website
[ DIR ]
drwxrwxrwx
xyz-main
[ DIR ]
drwxr-xr-x
Meetwise.zip
4.87
MB
-rw-r--r--
app.py
8.07
KB
-rw-rw-rw-
lstm_model.py
4.63
KB
-rw-rw-rw-
maps.py
597
B
-rw-rw-rw-
model_state.pth
5.2
MB
-rw-rw-rw-
mydb.db
84
KB
-rw-rw-rw-
requirements.txt
146
B
-rw-rw-rw-
tempCodeRunnerFile.py
8.88
KB
-rw-rw-rw-
vocab.json
55.22
KB
-rw-rw-rw-
Delete
Unzip
Zip
${this.title}
Close
Code Editor : lstm_model.py
# lstm_model.py import torch import torch.nn as nn import numpy as np import re import json import nltk from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer # Ensure necessary NLTK data is downloaded nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') # Set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ----------------------------- # Define the LSTM SentimentRNN # ----------------------------- class SentimentRNN(nn.Module): def __init__(self, vocab_size, embedding_dim=128, hidden_dim=256, output_dim=1, n_layers=2, drop_prob=0.5): super(SentimentRNN, self).__init__() self.n_layers = n_layers self.hidden_dim = hidden_dim # Embedding layer (+1 to account for the padding index) self.embedding = nn.Embedding(vocab_size+1, embedding_dim, padding_idx=0) # LSTM layer self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, batch_first=True, dropout=drop_prob) # Dropout layer self.dropout = nn.Dropout(0.3) # Fully connected layer self.fc = nn.Linear(hidden_dim, output_dim) # Sigmoid activation for binary classification self.sig = nn.Sigmoid() def forward(self, x, hidden): batch_size = x.size(0) embeds = self.embedding(x) lstm_out, hidden = self.lstm(embeds, hidden) # Take the output from the last time step lstm_out = lstm_out[:, -1, :] out = self.dropout(lstm_out) out = self.fc(out) sig_out = self.sig(out) return sig_out, hidden def init_hidden(self, batch_size): weight = next(self.parameters()).data hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().to(device)) return hidden # ----------------------------- # Preprocessing and Utility Functions # ----------------------------- lemmatizer = WordNetLemmatizer() def preprocess_text(text): """ Cleans and preprocesses the input text. """ text = str(text).lower() text = re.sub(r'<.*?>', '', text) text = re.sub(r'http\S+|www.\S+', '', text) text = re.sub(r'\S+@\S+', '', text) text = re.sub(r'\d+', '', text) text = re.sub(r'[^\w\s]', '', text) text = re.sub(r'\b\w\b', '', text) text = re.sub(r'\s+', ' ', text).strip() tokens = nltk.word_tokenize(text) stop_words = set(stopwords.words('english')) tokens = [word for word in tokens if word not in stop_words] tokens = [lemmatizer.lemmatize(word) for word in tokens] return ' '.join(tokens) def text_to_sequence(text, vocab): """ Converts preprocessed text into a sequence of integers using the provided vocabulary. """ return [vocab.get(word, 0) for word in text.split()] def pad_features(sequence, seq_length): """ Pads or truncates the sequence to a fixed length. """ padded = np.zeros((1, seq_length), dtype=int) seq = sequence[:seq_length] if len(seq) > 0: padded[0, -len(seq):] = np.array(seq) return padded # ----------------------------- # Loading Model and Vocabulary # ----------------------------- def load_vocab(vocab_path='vocab.json'): with open(vocab_path, 'r') as f: vocab = json.load(f) return vocab def load_model(model_path='model_state.pth', vocab_path='vocab.json'): """ Loads the trained LSTM model along with the vocabulary and returns them together with the sequence length used during training. """ vocab = load_vocab(vocab_path) vocab_size = len(vocab) # Set parameters as used during training embedding_dim = 128 hidden_dim = 256 output_dim = 1 n_layers = 2 seq_length = 100 # Must match the sequence length used during training model = SentimentRNN(vocab_size, embedding_dim, hidden_dim, output_dim, n_layers) model.load_state_dict(torch.load(model_path, map_location=device)) model.to(device) model.eval() return model, vocab, seq_length def predict_sentiment(text, model, vocab, seq_length): """ Given raw text, preprocesses it, converts to tensor, runs it through the model, and returns a binary sentiment prediction (0 or 1). """ processed = preprocess_text(text) seq = text_to_sequence(processed, vocab) padded_seq = pad_features(seq, seq_length) tensor_seq = torch.from_numpy(padded_seq).to(device, dtype=torch.long) h = model.init_hidden(1) with torch.no_grad(): output, h = model(tensor_seq, h) prediction = torch.round(output.squeeze()).item() return int (prediction)
Close