Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

working code #71

Open
kirillchertoganov opened this issue Nov 8, 2022 · 2 comments
Open

working code #71

kirillchertoganov opened this issue Nov 8, 2022 · 2 comments

Comments

@kirillchertoganov
Copy link

Can you please publish code that is work ?

I have this error:

('Failed to import pydot. You must pip install pydot and install graphviz (https://graphviz.gitlab.io/download/), ', 'for pydotprint to work.')

ITERATION NUMBER 1
BEST PLAYER VERSION 0
SELF PLAYING 30 EPISODES...
1


UnimplementedError Traceback (most recent call last)
in
63 ######## SELF PLAY ########
64 print('SELF PLAYING ' + str(config.EPISODES) + ' EPISODES...')
---> 65 _, memory, _, _ = playMatches(best_player, best_player, config.EPISODES, lg.logger_main, turns_until_tau0 = config.TURNS_UNTIL_TAU0, memory = memory)
66 print('\n')
67

~\Desktop\finantial risk\DeepReinforcementLearning-master\funcs.py in playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory, goes_first)
84 #### Run the MCTS algo and return an action
85 if turn < turns_until_tau0:
---> 86 action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 1)
87 else:
88 action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0)

~\Desktop\finantial risk\DeepReinforcementLearning-master\agent.py in act(self, state, tau)
84 lg.logger_mcts.info('****** SIMULATION %d ', sim + 1)
85 lg.logger_mcts.info('
*********************')
---> 86 self.simulate()
87
88 #### get action values

~\Desktop\finantial risk\DeepReinforcementLearning-master\agent.py in simulate(self)
66
67 ##### EVALUATE THE LEAF NODE
---> 68 value, breadcrumbs = self.evaluateLeaf(leaf, value, done, breadcrumbs)
69
70 ##### BACKFILL THE VALUE THROUGH THE TREE

~\Desktop\finantial risk\DeepReinforcementLearning-master\agent.py in evaluateLeaf(self, leaf, value, done, breadcrumbs)
134 if done == 0:
135
--> 136 value, probs, allowedActions = self.get_preds(leaf.state)
137 lg.logger_mcts.info('PREDICTED VALUE FOR %d: %f', leaf.state.playerTurn, value)
138

~\Desktop\finantial risk\DeepReinforcementLearning-master\agent.py in get_preds(self, state)
108 inputToModel = np.array([self.model.convertToModelInput(state)])
109
--> 110 preds = self.model.predict(inputToModel)
111 value_array = preds[0]
112 logits_array = preds[1]

~\Desktop\finantial risk\DeepReinforcementLearning-master\model.py in predict(self, x)
28
29 def predict(self, x):
---> 30 return self.model.predict(x)
31
32 def fit(self, states, targets, epochs, verbose, validation_split, batch_size):

~\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
1627 for step in data_handler.steps():
1628 callbacks.on_predict_batch_begin(step)
-> 1629 tmp_batch_outputs = self.predict_function(iterator)
1630 if data_handler.should_sync:
1631 context.async_wait()

~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in call(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()

~\anaconda3\lib\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
892 *args, **kwds)
893 # If we did not create any variables the trace we have is good enough.
--> 894 return self._concrete_stateful_fn._call_flat(
895 filtered_flat_args, self._concrete_stateful_fn.captured_inputs) # pylint: disable=protected-access
896

~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in _call_flat(self, args, captured_inputs, cancellation_manager)
1916 and executing_eagerly):
1917 # No tape is watching; skip to running the function.
-> 1918 return self._build_call_outputs(self._inference_function.call(
1919 ctx, args, cancellation_manager=cancellation_manager))
1920 forward_backward = self._select_forward_and_backward_functions(

~\anaconda3\lib\site-packages\tensorflow\python\eager\function.py in call(self, ctx, args, cancellation_manager)
553 with _InterpolateFunctionError(self):
554 if cancellation_manager is None:
--> 555 outputs = execute.execute(
556 str(self.signature.name),
557 num_outputs=self._num_outputs,

~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
57 try:
58 ctx.ensure_initialized()
---> 59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:

UnimplementedError: The Conv2D op currently only supports the NHWC tensor format on the CPU. The op was given the format: NCHW
[[node model_5/conv2d_65/Conv2D (defined at C:\Users\Admin\Desktop\finantial risk\DeepReinforcementLearning-master\model.py:30) ]] [Op:__inference_predict_function_6283]

Function call stack:
predict_function

@archcra
Copy link

archcra commented Nov 8, 2022 via email

@kirillchertoganov
Copy link
Author

it would be great maybe if can you can provide probably some docker container

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants