I can't get Bayesian Linear Regression to work with Tensorflow Probability. Here's my code:
!pip install tensorflow==2.0.0-rc1!pip install tensorflow-probability==0.8.0rc0import numpy as npimport tensorflow as tfimport tensorflow_probability as tfptfd = tfp.distributionsN = 20std = 1m = np.random.normal(0, scale=5, size=1).astype(np.float32)b = np.random.normal(0, scale=5, size=1).astype(np.float32)x = np.linspace(0, 100, N).astype(np.float32)y = m*x+b+ np.random.normal(loc=0, scale=std, size=N).astype(np.float32)num_results = 10000num_burnin_steps = 5000def joint_log_prob(x, y, m, b, std): rv_m = tfd.Normal(loc=0, scale=5) rv_b = tfd.Normal(loc=0, scale=5) rv_std = tfd.HalfCauchy(loc=0., scale=2.) y_mu = m*x+b rv_y = tfd.Normal(loc=y_mu, scale=std) return (rv_m.log_prob(m) + rv_b.log_prob(b) + rv_std.log_prob(std)+ tf.reduce_sum(rv_y.log_prob(y)))# Define a closure over our joint_log_prob.def target_log_prob_fn(m, b, std): return joint_log_prob(x, y, m, b, std)@tf.function(autograph=False)def do_sampling(): kernel=tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_log_prob_fn, step_size=0.05, num_leapfrog_steps=3) kernel = tfp.mcmc.SimpleStepSizeAdaptation( inner_kernel=kernel, num_adaptation_steps=int(num_burnin_steps * 0.8)) return tfp.mcmc.sample_chain( num_results=num_results, num_burnin_steps=num_burnin_steps, current_state=[ 0.01 * tf.ones([], name='init_m', dtype=tf.float32), 0.01 * tf.ones([], name='init_b', dtype=tf.float32), 1 * tf.ones([], name='init_std', dtype=tf.float32) ], kernel=kernel, trace_fn=lambda _, pkr: [pkr.inner_results.accepted_results.step_size, pkr.inner_results.log_accept_ratio])samples, [step_size, log_accept_ratio] = do_sampling()m_posterior, b_posterior, std_posterior = samplesp_accept = tf.reduce_mean(tf.exp(tf.minimum(log_accept_ratio, 0.)))print('Acceptance rate: {}'.format(p_accept))n_v = len(samples)true_values = [m, b, std]plt.figure()plt.title('Training data')plt.plot(x, y)plt.figure()plt.title('Visualizing trace and posterior distributions')for i, (sample, true_value) in enumerate(zip(samples, true_values)): plt.subplot(2*n_v, 2, 2*i+1) plt.plot(sample) plt.subplot(2*n_v, 2, 2*i+2) plt.hist(sample) plt.axvline(x=true_value)>>> Acceptance rate: 0.006775229703634977
Any ideas?