Implementing Asimov?s First Law of Robotics (bibtex)
	author = {Caycedo Alvarez, M.  and
 Berge,   {\O}. S.  and	 
 Berget, A. S. and  	 
 Bj{\o}rknes, E. S and  		 
 Johnsen, D.V.K. and
  Madsen, F. O. and	 
 Slavkovik, M.},
	title = { Implementing Asimov?s First Law of Robotics},
	journal = {Norsk Informatikkonferanse},

	year = {2017},
	keywords = {},
	abstract = {The need to make sure autonomous systems behave ethically is increasing with these systems becoming part of our society. Although there is no consensus to which actions an autonomous system should always be ethically obliged, preventing harm to people is an intuitive first candidate for a principle of behaviour. Do not hurt a human or allow a human to be hurt by your inaction is Asimov's First Law of robotics. We consider the challenges that the implementation of this Law will incur. To unearth these challenges we constructed a simulation of a First Robot Law abiding agent and an accident prone Human. We used a classic two-dimensional grid environment and explored to which extent an agent can be programmed, using standard artificial intelligence methods, to prevent a human from making dangerous actions. We outline the drawbacks of using the Asimov's First Law of robotics as an underlying ethical theory the governs an autonomous system's behaviour.},
	issn = {1892-0721},	url = {}
Powered by bibtexbrowser