<?xml version="1.0" encoding="utf-8"?>
<oembed>
  <version>1</version>
  <type>rich</type>
  <provider_name>Libsyn</provider_name>
  <provider_url>https://www.libsyn.com</provider_url>
  <height>90</height>
  <width>600</width>
  <title>Optimising for Trouble – Game Theory and AI Safety | with Jobst Heitzig</title>
  <description>What happens when an AI system faithfully follows a flawed goal? In this episode, we explore how even well-designed algorithms can produce dangerous outcomes — from amplifying hate speech to mismanaging infrastructure — simply by optimising a reward function which, like all reward functions, fails to encode all that matters. We discuss the hidden risks of reinforcement learning, why over-optimisation can backfire, and how game theory helps us rethink what it means for AI to act &amp;quot;rationally&amp;quot; in complex, real-world environments. &amp;amp;nbsp; Jobst Heitzig is a mathematician at the Potsdam Institute for Climate Impact Research and an expert in AI safety and decision design. </description>
  <author_name>Game Changer - the game theory podcast</author_name>
  <author_url>http://tws-gamechanger.libsyn.com/website</author_url>
  <html>&lt;iframe title="Libsyn Player" style="border: none" src="//html5-player.libsyn.com/embed/episode/id/40135170/height/90/theme/custom/thumbnail/yes/direction/forward/render-playlist/no/custom-color/88AA3C/" height="90" width="600" scrolling="no"  allowfullscreen webkitallowfullscreen mozallowfullscreen oallowfullscreen msallowfullscreen&gt;&lt;/iframe&gt;</html>
  <thumbnail_url>https://assets.libsyn.com/secure/content/198644825</thumbnail_url>
</oembed>
