rnn.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. #!/usr/local/bin/python3
  2. # -*- coding: utf-8 -*-
  3. import torch
  4. import torch.nn as nn
  5. import torch.nn.functional as F
  6. import torch.utils.data as tudata
  7. import numpy as np
  8. import sys
  9. import json
  10. import os
  11. #将数据输出到一个json文件,便于在tiled进行观察,可以是单层或者多层的,多层的话,层级按照从小到大排列(层级是布局层级,不是视觉层级)
  12. def output_to_json(slices, suffix="ex"):
  13. with open("/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/templates/tm_0000.json", 'r') as f:
  14. json_temp = json.load(f)
  15. for i in range(0, len(slices)):
  16. with open("/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/slices/slice_%d_%s.json" % (i, suffix), "w") as out_f:
  17. json_temp["layers"][0]["data"] = slices[i]
  18. json.dump(json_temp, out_f)
  19. # 解析一个关卡文件,得到各层的数据,是按照视觉层次划分的
  20. def get_each_view_slice(jsonLv):
  21. jsonData = ""
  22. tilesByPos = {}
  23. maxZ = 0
  24. # 处理各层的tile数据
  25. with open(jsonLv, 'r') as f:#, encoding='utf-8'
  26. jsonData = json.load(f)
  27. # 得到宽高信息
  28. w = int(jsonData['width'])
  29. h = int(jsonData['height'])
  30. for item in jsonData['layers']:
  31. name = item['name']
  32. if not name.startswith('Tile_'):
  33. # stacked
  34. continue
  35. z = int(name[5:])
  36. data = item['data']
  37. for i in range(0, len(data)):
  38. if data[i] == 0:
  39. continue
  40. tile_x = (int)(i % w)
  41. tile_y = (int)(i / h)
  42. tile_z = z
  43. # 一些统计信息
  44. tilesByPos[(tile_x, tile_y, tile_z)] = [1,0] # [tile类型,zView--视觉层级,默认是0]
  45. if tile_z > maxZ:
  46. maxZ = tile_z
  47. # 根据上面的布局信息,计算每个tile的几个信息:
  48. # 1. 每个tile的视觉层级(不同于上面z的信息,那是一个布局信息,相同的z可能出在不同的视觉层级)
  49. # 从maxZ开始,逐层向下计算;对于每一个位置,如果该位置有tile,则计算该tile的视觉层级
  50. # 对于每一个tile,如果其上方(从该tile的z+1层,一直到maxZ层)有tile,则其视觉层级为上方tile的视觉层级+1
  51. slice_by_view_z = [[0]*w*h for i in range(maxZ+1)]
  52. for z in range(0, maxZ+1):
  53. z = maxZ - z
  54. for x in range(0, w+1):
  55. for y in range(0, h+1):
  56. if (x,y,z) in tilesByPos:
  57. tile = tilesByPos[(x, y, z)]
  58. adjs = [(x,y),(x,y+1),(x,y-1),(x+1,y),(x+1,y+1),(x+1,y-1),(x-1,y),(x-1,y-1),(x-1,y+1)]
  59. # 确定视觉层级
  60. zvMax = -1
  61. for zup in range(z+1, maxZ+1):
  62. for adj in adjs:
  63. if (adj[0],adj[1],zup) in tilesByPos:
  64. zv = tilesByPos[adj[0],adj[1],zup][1]
  65. if zv > zvMax:
  66. zvMax = zv
  67. zv = zvMax + 1
  68. tilesByPos[(x, y, z)][1] = zv
  69. # 记录到slice_by_view_z中
  70. slice_by_view_z[zv][y*w+x] = tilesByPos[(x, y, z)][0]
  71. return slice_by_view_z
  72. # 测试获取视觉层级的数据是否ok
  73. def test_get_each_view_slice():
  74. jsonLv = "/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/templates/tm_0016.json"
  75. slice_by_view_z = get_each_view_slice(jsonLv)
  76. output_to_json(slice_by_view_z)
  77. ##########################################
  78. # PyTorch RNN
  79. class TileMatchRNN(nn.Module):
  80. def __init__(self, input_size, hidden_size, num_layers, output_size):
  81. super(TileMatchRNN, self).__init__()
  82. self.hidden_size = hidden_size
  83. self.num_layers = num_layers
  84. self.rnn = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
  85. self.fc = nn.Linear(hidden_size, output_size)
  86. # 检查是否有可用的GPU
  87. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  88. def forward(self, x):
  89. # Add an extra dimension for the batch size if necessary
  90. if x.dim() == 2:
  91. x = x.unsqueeze(0)
  92. out, h = self.rnn(x)
  93. out = self.fc(out[:, -1, :])
  94. return out, h
  95. # 全连接神经网络
  96. class TileMatchFullyConnectedNetwork(nn.Module):
  97. def __init__(self, input_size, hidden_size, num_layers, output_size):
  98. super(TileMatchFullyConnectedNetwork, self).__init__()
  99. self.layer1 = nn.Linear(input_size, hidden_size)
  100. self.layer2 = nn.Linear(hidden_size, output_size)
  101. # 检查是否有可用的GPU
  102. self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  103. def forward(self, x):
  104. x = torch.relu(self.layer1(x))
  105. x = self.layer2(x)
  106. return x
  107. # 变分自编码器
  108. class TileMatchVAE(nn.Module):
  109. def __init__(self, input_size, hidden_size, latent_size):
  110. super(TileMatchVAE, self).__init__()
  111. self.encoder = nn.Sequential(
  112. nn.Linear(input_size, hidden_size),
  113. nn.ReLU(),
  114. nn.Linear(hidden_size, latent_size * 2) # We need mean and variance for each latent variable
  115. )
  116. self.decoder = nn.Sequential(
  117. nn.Linear(latent_size, hidden_size),
  118. nn.ReLU(),
  119. nn.Linear(hidden_size, input_size),
  120. nn.Sigmoid() # To get outputs in the range [0, 1]
  121. )
  122. def reparameterize(self, mu, logvar):
  123. std = torch.exp(0.5 * logvar)
  124. eps = torch.randn_like(std)
  125. return mu + eps * std
  126. def forward(self, x):
  127. h = self.encoder(x)
  128. mu, logvar = h.chunk(2, dim=1)
  129. z = self.reparameterize(mu, logvar)
  130. return self.decoder(z), mu, logvar
  131. class TileMatchDataset(tudata.Dataset):
  132. def __init__(self, directories):
  133. self.data = []
  134. for directory in directories:
  135. for filename in os.listdir(directory):
  136. filepath = os.path.join(directory, filename)
  137. if os.path.isfile(filepath) and filepath.endswith('.json'):
  138. slices = get_each_view_slice(filepath)
  139. if len(slices) > 1:
  140. for i in range(0, len(slices)-1):
  141. self.data.append([slices[i], slices[i+1]])
  142. def __len__(self):
  143. return len(self.data)
  144. def __getitem__(self, idx):
  145. input_sequence = self.data[idx][0]
  146. target_sequence = self.data[idx][1]
  147. return torch.tensor(input_sequence, dtype=torch.float32), torch.tensor(target_sequence, dtype=torch.float32)
  148. # 模型的参数
  149. input_size = 900
  150. hidden_size = 50
  151. num_layers = 2
  152. output_size = 900
  153. def train_NN():
  154. # Initialize model, loss function, optimizer
  155. # model = TileMatchRNN(input_size, hidden_size, num_layers, output_size)
  156. # criterion = nn.BCEWithLogitsLoss() # 适合二分类问题
  157. # optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
  158. model = TileMatchVAE(input_size, 500, 20)
  159. criterion = nn.BCEWithLogitsLoss() # 适合二分类问题
  160. optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
  161. # 准备数据集
  162. batch_size = 1
  163. directories = ["/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/tf_templates",]
  164. dataset = TileMatchDataset(directories)
  165. train_loader = tudata.DataLoader(dataset, batch_size=batch_size, shuffle=True)
  166. num_epochs = 10
  167. for epoch in range(num_epochs):
  168. for data in train_loader: # Assume train_loader provides (input, target) pairs
  169. inputs, targets = data
  170. # outputs, _ = model(inputs)
  171. x_recon, mu, logvar = model(inputs)
  172. recon_loss = F.binary_cross_entropy(x_recon, inputs, reduction='sum')
  173. kl_div = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
  174. loss = recon_loss + kl_div
  175. optimizer.zero_grad()
  176. loss.backward()
  177. optimizer.step()
  178. return model
  179. if __name__ == '__main__':
  180. # test_get_each_view_slice()
  181. n = train_NN()
  182. torch.save(n, '/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/nn_VAE.model')
  183. # n = torch.load('/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/nn_VAE.model')
  184. slices = get_each_view_slice("/Users/xulianxin/Documents/develop/game/TileMatch/TileManor.Lv/TileManor/templates/tm_0015.json")
  185. n.eval()
  186. # 准备输入数据
  187. initial_input = torch.tensor(slices[0], dtype=torch.float32).unsqueeze(0) # (1, 1, 900)
  188. # 初始化隐藏状态
  189. h0 = torch.zeros(num_layers, initial_input.size(0), hidden_size)
  190. # 使用模型进行预测 生成6层
  191. slices = []
  192. for i in range(1):
  193. with torch.no_grad(): # 在评估模式下,不需要计算梯度
  194. # output, hn = n(initial_input)
  195. output, mu, logvar = n(initial_input)
  196. initial_input = output
  197. # 应用阈值判断将浮点数转化为0或1
  198. threshold = 0.9
  199. output_layout = (torch.sigmoid(output) >= threshold).int()
  200. # 将 output_layout 转化成一个[]
  201. # slices.append([int(d) for d in output_layout[0]])
  202. slices.append([int(d) for d in output_layout[0]])
  203. output_to_json(slices, "nn_vae")