Quantcast
Channel: Recent Questions - Stack Overflow
Viewing all articles
Browse latest Browse all 12111

Expected input batch_size (24) to match target batch_size (18)

$
0
0
class GCNModel(nn.Module):    def __init__(self, in_channels, hidden_dim, out_channels, edge_dim): # 5, 64, 6, 3        super(GCNModel, self).__init__()         self.conv1 = Edge_GCNConv(in_channels=in_channels, out_channels=hidden_dim, edge_dim=edge_dim)        self.conv2 = Edge_GCNConv(in_channels=hidden_dim, out_channels=out_channels, edge_dim=edge_dim)        self.batch_norm1 = nn.BatchNorm1d(hidden_dim)          self.batch_norm2 = nn.BatchNorm1d(out_channels)          self.linear = nn.Linear(out_channels, out_channels)      def forward(self, x, edge_index, edge_attr):        x, edge_index, edge_attr = x, edge_index, edge_attr        x1 = self.conv1(x, edge_index, edge_attr)        x1 = self.batch_norm1(x1)        x1 = F.relu(x1)        x1 = F.dropout(x1, p=0.1, training=self.training)        # print("GCNModel x1:", x1)         # print("GCNModel x1.shape:", x1.shape) # (24, 64)        x2 = self.conv2(x1, edge_index, edge_attr)        x2 = self.batch_norm2(x2)        x2 = F.relu(x2)        # print("GCNModel x2:", x2)        # print("GCNModel x2.shape:", x2.shape) # (24, 6)        x2 = F.dropout(x2, p=0.1, training=self.training)        out = self.linear(x2)        print("GCNModel out:", out)        print("GCNModel out.shape:", out.shape) # (24, 6)        return outdef train_model(train_loader, val_loader, model, optimizer, output_dim, threshold_value, num_epochs=100, early_stopping_rounds=50, batch_size=4):"""训练GNN模型,使用 k 折交叉验证        Args:            train_loader: 训练数据加载器            val_loader: 验证数据加载器            model: GNN模型            optimizer: 优化器            num_epochs: 训练轮数 (default: 100)            early_stopping_rounds: 早停轮数 (default: 50)"""            best_val_loss = float('inf')    best_accuracy = 0  # Track best validation accuracy    rounds_without_improvement = 0    # 创建损失函数    criterion = nn.CrossEntropyLoss()     # criterion = nn.BCEWithLogitsLoss() # 二分类    for epoch in range(num_epochs):        model.train()        total_loss = 0        correct = 0        total = 0        for data in train_loader:            optimizer.zero_grad()            #################### error #################            out = model(data.x, data.edge_index, data.edge_attr)            # 转换 data.y 为多热编码            one_hot_labels = convert_to_one_hot(data.y, output_dim)            print("train_model out.shape:", out.shape) # (24, 6)            print("train_model data.y.shape:", data.y.shape) # (18, 2)            print("train_model data.edge_attr.shape:", data.edge_attr.shape) # (18, 3)            print("train_model data.edge_attr:", data.edge_attr)            print("train_model one_hot_labels.shape:", one_hot_labels.shape) # (18, 6)            loss = criterion(out, one_hot_labels)            #################################################            # print("train_model loss:", loss)            total_loss += loss.item()            # print("torch.sigmoid(out):", torch.sigmoid(out))            predicted = (torch.sigmoid(out) >= threshold_value).long()            # print("predicted:", predicted)            correct += (predicted == one_hot_labels).all(dim=1).sum().item()            # print("correct:", correct)            total += len(data.y)            # print("total:", total )            loss.backward()            optimizer.step()        avg_train_loss = total_loss / len(train_loader)        train_accuracy = correct / total

The tensor of out.shape is the output obtained by data.x.shapeMy data.y.shape is obtained from data.edge_attr

What can I do to fix the mismatch between the number of out and one_hot_labelsAm I modifying the model or am I modifying the dimensions of the output???

The tensor of out.shape is the output obtained by data.y.shape


Viewing all articles
Browse latest Browse all 12111

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>