diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs
index 10f678e0a..6049c95cc 100644
--- a/src/TensorFlowNET.Core/APIs/c_api.cs
+++ b/src/TensorFlowNET.Core/APIs/c_api.cs
@@ -16,6 +16,7 @@ limitations under the License.
 
 using System;
 using System.Runtime.InteropServices;
+using static Tensorflow.CppShapeInferenceResult.Types;
 
 namespace Tensorflow
 {
@@ -50,6 +51,19 @@ public static string StringPiece(IntPtr handle)
             return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
         }
 
+        public unsafe static byte[] ByteStringPiece(IntPtr handle)
+        {
+            byte* str_data = (byte*)handle.ToPointer();
+            List<byte> bytes = new List<byte>();
+            byte current = 255;
+            while (current != ((byte)'\0'))
+            {
+                current = *(str_data++);
+                bytes.Add(current);
+            }
+            return bytes.Take(bytes.Count - 1).ToArray();
+        }
+
         [UnmanagedFunctionPointer(CallingConvention.Winapi)]
         public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args);
 
diff --git a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
index 239487e05..cd5a71e50 100644
--- a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
@@ -46,10 +46,10 @@ public Tensor while_loop(Func<Tensor, Tensor> cond,
             Tensor loop_vars,
             int parallel_iterations = 10)
         {
-            Func<Tensor[], Tensor> cond1 = x
+            Func<Tensors, Tensor> cond1 = x
                 => cond(x[0]);
 
-            Func<Tensor[], Tensor[]> body1 = x
+            Func<Tensors, Tensors> body1 = x
                 => new[] { body(x[0]) };
 
             var results = control_flow_ops.while_loop(cond1,
@@ -58,9 +58,9 @@ public Tensor while_loop(Func<Tensor, Tensor> cond,
             return results[0];
         }
 
-        public Tensor[] while_loop(Func<Tensor[], Tensor> cond,
-            Func<Tensor[], Tensor[]> body,
-            Tensor[] loop_vars,
+        public Tensor[] while_loop(Func<Tensors, Tensor> cond,
+            Func<Tensors, Tensors> body,
+            Tensors loop_vars,
             int parallel_iterations = 10,
             string name = null)
             => control_flow_ops.while_loop(cond, body, loop_vars,
diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
index be8c2ab24..45aebc0cd 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
@@ -71,15 +71,15 @@ public Tensor strided_slice<T>(Tensor input, T[] begin, T[] end, T[] strides = n
         public Tensor[] split(Tensor value, int num_split, Tensor axis, string name = null)
             => array_ops.split(
                 value: value,
-                num_split: num_split,
+                num_or_size_splits: num_split,
                 axis: axis,
                 name: name);
 
         public Tensor[] split(Tensor value, int num_split, int axis, string name = null)
             => array_ops.split(
                 value: value,
-                num_split: num_split,
-                axis: axis,
+                num_or_size_splits: num_split,
+                axis: ops.convert_to_tensor(axis),
                 name: name);
 
         public Tensor ensure_shape(Tensor x, Shape shape, string name = null)
diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs
index 8df39334a..c5705930e 100644
--- a/src/TensorFlowNET.Core/Binding.Util.cs
+++ b/src/TensorFlowNET.Core/Binding.Util.cs
@@ -503,7 +503,7 @@ public static TF_DataType GetDataType(this object data)
                 case Tensors tensors:
                     return tensors.dtype;
                 case IEnumerable<Tensor> tensors:
-                    return tensors.First().dtype;
+                    return tensors.Where(x => x is not null).First().dtype;
                 case RefVariable variable:
                     return variable.dtype;
                 case ResourceVariable variable:
diff --git a/src/TensorFlowNET.Core/Extensions/DictionaryExtension.cs b/src/TensorFlowNET.Core/Common/Extensions/DictionaryExtension.cs
similarity index 100%
rename from src/TensorFlowNET.Core/Extensions/DictionaryExtension.cs
rename to src/TensorFlowNET.Core/Common/Extensions/DictionaryExtension.cs
diff --git a/src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs
similarity index 80%
rename from src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs
rename to src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs
index 2e758dbf1..6ceba445a 100644
--- a/src/TensorFlowNET.Core/Extensions/JObjectExtensions.cs
+++ b/src/TensorFlowNET.Core/Common/Extensions/JObjectExtensions.cs
@@ -3,16 +3,16 @@
 using System.Collections.Generic;
 using System.Text;
 
-namespace Tensorflow.Extensions
+namespace Tensorflow.Common.Extensions
 {
     public static class JObjectExtensions
     {
         public static T? TryGetOrReturnNull<T>(this JObject obj, string key)
         {
             var res = obj[key];
-            if(res is null)
+            if (res is null)
             {
-                return default(T);
+                return default;
             }
             else
             {
diff --git a/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs
new file mode 100644
index 000000000..287b48cc3
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Extensions/LinqExtensions.cs
@@ -0,0 +1,38 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+
+namespace Tensorflow.Common.Extensions
+{
+    public static class LinqExtensions
+    {
+#if NETSTANDARD2_0
+        public static IEnumerable<T> TakeLast<T>(this IEnumerable<T> sequence, int count)
+        {
+            return sequence.Skip(sequence.Count() - count);
+        }
+
+        public static IEnumerable<T> SkipLast<T>(this IEnumerable<T> sequence, int count)
+        {
+            return sequence.Take(sequence.Count() - count);
+        }
+#endif
+        public static Tensors ToTensors(this Tensor[] tensors)
+        {
+            return new Tensors(tensors);
+        }
+
+        public static Tensors ToTensors(this IList<Tensor> tensors)
+        {
+            return new Tensors(tensors);
+        }
+
+        public static void Deconstruct<T1, T2, T3>(this (T1, T2, T3) values, out T1 first, out T2 second, out T3 third)
+        {
+            first = values.Item1;
+            second = values.Item2;
+            third = values.Item3;
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs b/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs
new file mode 100644
index 000000000..76bdd6133
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Extensions/NestExtensions.cs
@@ -0,0 +1,33 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Common.Types;
+
+namespace Tensorflow.Common.Extensions
+{
+    public static class NestExtensions
+    {
+        public static Tensors ToTensors(this INestable<Tensor> tensors)
+        {
+            return new Tensors(tensors.AsNest());
+        }
+
+        public static Tensors? ToTensors(this Nest<Tensor> tensors)
+        {
+            return Tensors.FromNest(tensors);
+        }
+
+        /// <summary>
+        /// If the nested object is already a nested type, this function could reduce it.
+        /// For example, `Nest[Nest[T]]` can be reduced to `Nest[T]`.
+        /// </summary>
+        /// <typeparam name="TIn"></typeparam>
+        /// <typeparam name="TOut"></typeparam>
+        /// <param name="input"></param>
+        /// <returns></returns>
+        public static Nest<TOut> ReduceTo<TIn, TOut>(this INestStructure<TIn> input) where TIn: INestStructure<TOut>
+        {
+            return Nest<TOut>.ReduceFrom(input);
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Extensions/OneofExtension.cs b/src/TensorFlowNET.Core/Common/Extensions/OneofExtension.cs
similarity index 100%
rename from src/TensorFlowNET.Core/Extensions/OneofExtension.cs
rename to src/TensorFlowNET.Core/Common/Extensions/OneofExtension.cs
diff --git a/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs b/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs
new file mode 100644
index 000000000..d0c35ee70
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/FakeTensorByTensorArray.cs
@@ -0,0 +1,20 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    /// <summary>
+    /// This is a temp solution, which should be removed after refactoring `Tensors`
+    /// </summary>
+    [Obsolete]
+    public class FakeTensorByTensorArray: Tensor
+    {
+        public TensorArray TensorArray { get; set; }
+
+        public FakeTensorByTensorArray(TensorArray array)
+        {
+            TensorArray = array;
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs
new file mode 100644
index 000000000..986136f4d
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/GeneralizedTensorShape.cs
@@ -0,0 +1,69 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    public class GeneralizedTensorShape: Nest<Shape>
+    {
+        public GeneralizedTensorShape(Shape value, string? name = null)
+        {
+            NodeValue = value;
+            NestType = NestType.Node;
+        }
+
+        public GeneralizedTensorShape(IEnumerable<Shape> values, string? name = null)
+        {
+            ListValue = values.Select(s => new Nest<Shape>(s) as INestStructure<Shape>).ToList();
+            Name = name;
+            NestType = NestType.List;
+        }
+
+        public GeneralizedTensorShape(Dictionary<string, Shape> value, string? name = null)
+        {
+            DictValue = value.ToDictionary(x => x.Key, x => new Nest<Shape>(x.Value) as INestStructure<Shape>);
+            Name = name;
+            NestType = NestType.Dictionary;
+        }
+
+        public GeneralizedTensorShape(Nest<Shape> other)
+        {
+            NestType = other.NestType;
+            NodeValue = other.NodeValue;
+            DictValue = other.DictValue;
+            ListValue = other.ListValue;
+            Name = other.Name;
+        }
+
+        public Shape ToSingleShape()
+        {
+            var shapes = Flatten().ToList();
+            if (shapes.Count != 1)
+            {
+                throw new ValueError("The generalized shape contains more than 1 dim.");
+            }
+            return shapes[0];
+        }
+
+        public long ToNumber()
+        {
+            var shapes = Flatten().ToList();
+            if (shapes.Count != 1 || shapes[0].ndim != 1)
+            {
+                throw new ValueError("The generalized shape contains more than 1 dim.");
+            }
+            return shapes[0].dims[0];
+        }
+
+        public INestStructure<TensorShapeConfig> ToTensorShapeConfigs()
+        {
+            return MapStructure(s => new TensorShapeConfig() { Items = s.dims.Select<long, long?>(x => x == -1 ? null : x).ToArray() });
+        }
+
+        public static implicit operator GeneralizedTensorShape(Shape shape)
+        {
+            return new GeneralizedTensorShape(shape);
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/INestStructure.cs b/src/TensorFlowNET.Core/Common/Types/INestStructure.cs
new file mode 100644
index 000000000..32b662937
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/INestStructure.cs
@@ -0,0 +1,40 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    /// <summary>
+    /// This interface indicates that a class may have a nested structure and provide
+    /// methods to manipulate with the structure.
+    /// </summary>
+    public interface INestStructure<T>: INestable<T>
+    {
+        NestType NestType { get; }
+
+        /// <summary>
+        /// The item count of depth 1 of the nested structure.
+        /// For example, [1, 2, [3, 4, 5]] has ShallowNestedCount = 3.
+        /// </summary>
+        int ShallowNestedCount { get; }
+        /// <summary>
+        /// The total item count of depth 1 of the nested structure.
+        /// For example, [1, 2, [3, 4, 5]] has TotalNestedCount = 5.
+        /// </summary>
+        int TotalNestedCount { get; }
+
+        /// <summary>
+        /// Flatten the Nestable object. Node that if the object contains only one value, 
+        /// it will be flattened to an enumerable with one element.
+        /// </summary>
+        /// <returns></returns>
+        IEnumerable<T> Flatten();
+        /// <summary>
+        /// Construct a new object with the same nested structure.
+        /// </summary>
+        /// <typeparam name="TOut"></typeparam>
+        /// <param name="func"></param>
+        /// <returns></returns>
+        INestStructure<TOut> MapStructure<TOut>(Func<T, TOut> func);
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/INestable.cs b/src/TensorFlowNET.Core/Common/Types/INestable.cs
new file mode 100644
index 000000000..7ce49f85a
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/INestable.cs
@@ -0,0 +1,11 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    public interface INestable<T>
+    {
+        Nest<T> AsNest();
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs b/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs
new file mode 100644
index 000000000..427e71aaa
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/IOptionalArgs.cs
@@ -0,0 +1,21 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    /// <summary>
+    /// This interface is used when some corresponding python methods have optional args.
+    /// For example, `Keras.Layer.Apply` generally takes three args as the inputs, while 
+    /// `Keras.Layer.RNN` takes more. Then when calling RNN, you should add `RnnOptionalArgs` 
+    /// as the parameter of the method.
+    /// </summary>
+    public interface IOptionalArgs
+    {
+        /// <summary>
+        /// The identifier of the class. It is not an argument but only something to 
+        /// separate different OptionalArgs.
+        /// </summary>
+        string Identifier { get; }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Extensions/NamedTuple.cs b/src/TensorFlowNET.Core/Common/Types/NamedTuple.cs
similarity index 100%
rename from src/TensorFlowNET.Core/Extensions/NamedTuple.cs
rename to src/TensorFlowNET.Core/Common/Types/NamedTuple.cs
diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs
new file mode 100644
index 000000000..dc7fd3a1f
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/Nest.Static.cs
@@ -0,0 +1,62 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    public static class Nest
+    {
+        /// <summary>
+        /// Pack the flat items to a nested sequence by the template.
+        /// </summary>
+        /// <typeparam name="T"></typeparam>
+        /// <param name="template"></param>
+        /// <param name="flatItems"></param>
+        /// <returns></returns>
+        public static Nest<TOut> PackSequenceAs<T, TOut>(INestable<T> template, TOut[] flatItems)
+        {
+            return template.AsNest().PackSequence(flatItems);
+        }
+
+        /// <summary>
+        /// Pack the flat items to a nested sequence by the template.
+        /// </summary>
+        /// <typeparam name="T"></typeparam>
+        /// <param name="template"></param>
+        /// <param name="flatItems"></param>
+        /// <returns></returns>
+        public static Nest<T> PackSequenceAs<T>(INestable<T> template, List<T> flatItems)
+        {
+            return template.AsNest().PackSequence(flatItems.ToArray());
+        }
+
+        /// <summary>
+        /// Flatten the nested object.
+        /// </summary>
+        /// <typeparam name="T"></typeparam>
+        /// <param name="nestedObject"></param>
+        /// <returns></returns>
+        public static IEnumerable<T> Flatten<T>(INestable<T> nestedObject)
+        {
+            return nestedObject.AsNest().Flatten();
+        }
+
+        /// <summary>
+        /// Map the structure with specified function.
+        /// </summary>
+        /// <typeparam name="TIn"></typeparam>
+        /// <typeparam name="TOut"></typeparam>
+        /// <param name="func"></param>
+        /// <param name="nestedObject"></param>
+        /// <returns></returns>
+        public static INestStructure<TOut> MapStructure<TIn, TOut>(Func<TIn, TOut> func, INestable<TIn> nestedObject)
+        {
+            return nestedObject.AsNest().MapStructure(func);
+        }
+
+        public static bool IsNested<T>(INestable<T> obj)
+        {
+            return obj.AsNest().IsNested();
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/Nest.cs b/src/TensorFlowNET.Core/Common/Types/Nest.cs
new file mode 100644
index 000000000..89ce29f2f
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/Nest.cs
@@ -0,0 +1,485 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Common.Extensions;
+
+namespace Tensorflow.Common.Types
+{
+    public enum NestType
+    {
+        Empty,
+        Node,
+        List,
+        Dictionary
+    }
+
+    /// <summary>
+    /// A nested structure which may inclulde value, list and dictionary. 
+    /// Note that dictionary does not ensure the data order. When using it as IEnumerable, 
+    /// its order is depth-first.
+    /// </summary>
+    /// <typeparam name="T"></typeparam>
+    public class Nest<T> : INestStructure<T>, IEnumerable<T>
+    {
+        private static readonly Nest<T> _empty = new Nest<T>()
+        {
+            NestType = NestType.Empty,
+        };
+        public static Nest<T> Empty => _empty;
+        public NestType NestType { get; protected set; }
+        public string? Name { get; set; }
+        public T? NodeValue { get; protected set; }
+        public List<INestStructure<T>>? ListValue { get; protected set; }
+        public Dictionary<string, INestStructure<T>>? DictValue { get; protected set; }
+
+        public int ShallowNestedCount
+        {
+            get
+            {
+                if (NestType == NestType.Empty)
+                {
+                    return 0;
+                }
+                else if (NestType == NestType.Node)
+                {
+                    return 1;
+                }
+                else if (NestType == NestType.List)
+                {
+                    return ListValue!.Count;
+                }
+                else // dict
+                {
+                    return DictValue!.Count;
+                }
+            }
+        }
+
+        public int TotalNestedCount
+        {
+            get
+            {
+                return Flatten().Count();
+            }
+        }
+
+        protected Nest() { }
+
+        public Nest(T value, string? name = null)
+        {
+            NodeValue = value;
+            Name = name;
+            NestType = NestType.Node;
+        }
+
+        public Nest(IEnumerable<INestStructure<T>> values, string? name = null)
+        {
+            ListValue = values.ToList();
+            Name = name;
+            NestType = NestType.List;
+        }
+
+        public Nest(Dictionary<string, INestStructure<T>> value, string? name = null)
+        {
+            DictValue = value;
+            Name = name;
+            NestType = NestType.Dictionary;
+        }
+
+        public Nest(Nest<T> other)
+        {
+            NestType = other.NestType;
+            NodeValue = other.NodeValue;
+            DictValue = other.DictValue;
+            ListValue = other.ListValue;
+            Name = other.Name;
+        }
+
+        public virtual IEnumerable<T> Flatten()
+        {
+            return FlattenInternal(this);
+        }
+        public virtual INestStructure<TOut> MapStructure<TOut>(Func<T, TOut> func)
+        {
+            return MapStructureInternal(func);
+        }
+
+        /// <summary>
+        /// Pack the flat items to a nested sequence by the template.
+        /// </summary>
+        /// <param name="flatItems"></param>
+        /// <returns></returns>
+        public virtual Nest<TOut> PackSequence<TOut>(TOut[] flatItems)
+        {
+            if(flatItems.Length == 0)
+            {
+                return Nest<TOut>.Empty;
+            }
+            int index = 0;
+            return PackSequenceInternal(this, flatItems, ref index);
+        }
+
+        private static Nest<TOut> PackSequenceInternal<TOut>(Nest<T> template, TOut[] flatItems, ref int index)
+        {
+            if(template.NestType == NestType.Node)
+            {
+                if(index >= flatItems.Length)
+                {
+                    throw new InvalidArgumentError("The template and flat items are not matched.");
+                }
+                return new Nest<TOut>(flatItems[index++]);
+            }
+            else if(template.NestType == NestType.List)
+            {
+                List<Nest<TOut>> nestedObjects = new List<Nest<TOut>>();
+                for (int i = 0; i < template.ListValue!.Count; i++)
+                {
+                    nestedObjects.Add(PackSequenceInternal(template.ListValue![i].AsNest(), flatItems, ref index));
+                }
+                return new Nest<TOut>(nestedObjects);
+            }
+            else if(template.NestType == NestType.Node)
+            {
+                Dictionary<string, INestStructure<TOut>> dict = new Dictionary<string, INestStructure<TOut>>();
+                foreach(var (key, value) in template.DictValue!)
+                {
+                    dict[key] = PackSequenceInternal(value.AsNest(), flatItems, ref index);
+                }
+                return new Nest<TOut>(dict);
+            }
+            // Consider Empty as invalid type.
+            throw new InvalidArgumentError("When using `PackSequenceAs`, the template cannot contain empty node.");
+        }
+
+        public virtual Nest<T> AsNest()
+        {
+            return this;
+        }
+
+        public virtual Nest<T> MergeWith(Nest<T>? other)
+        {
+            if(other is null || other == Nest<T>.Empty)
+            {
+                return this;
+            }
+            if(this == Nest<T>.Empty)
+            {
+                return other;
+            }
+            if(NestType == NestType.Node && other.NestType == NestType.Node)
+            {
+                return new Nest<T>(new Nest<T>[] { this, other });
+            }
+            else if(NestType == NestType.List && other.NestType == NestType.List)
+            {
+                return new Nest<T>(this.ListValue!.Concat(other.ListValue!));
+            }
+            else if(NestType == NestType.Dictionary && other.NestType == NestType.Dictionary)
+            {
+                return new Nest<T>(this.DictValue!.Concat(other.DictValue!).ToDictionary(x => x.Key, x => x.Value));
+            }
+            else
+            {
+                return new Nest<T>(new Nest<T>[] { this, other });
+            }
+        }
+
+        /// <summary>
+        /// To see if the nested object is really nested. Despite being called `Nest`, sometimes it's actually not 
+        /// nested. For example, [1, 2, 3] is not nested, while [1, [2, 3]] is nested.
+        /// </summary>
+        /// <returns></returns>
+        public bool IsNested()
+        {
+            if(NestType is NestType.Empty or NestType.Node)
+            {
+                return false;
+            }
+            else if(NestType is NestType.List)
+            {
+                return ListValue!.Count > 0;
+            }
+            else
+            {
+                return DictValue!.Count > 0;
+            }
+        }
+
+        [Obsolete("The indexer of Tensors is not encouraged because it leads to unclear meanings.")]
+        public T this[int index]
+        {
+            get
+            {
+                bool success = FindInternal(this, index, out var result);
+                if (success)
+                {
+                    return result;
+                }
+                else
+                {
+                    throw new IndexOutOfRangeException();
+                }
+            }
+            set
+            {
+                bool success = SetInternal(this, index, value);
+                if (!success)
+                {
+                    throw new IndexOutOfRangeException();
+                }
+            }
+        }
+
+        /// <summary>
+        /// If the existing nested structure if of type `Nest[INestStructure[T]]`, we can reduce it 
+        /// to `Nest[T]`.
+        /// </summary>
+        /// <typeparam name="TOut"></typeparam>
+        /// <param name="input"></param>
+        /// <returns></returns>
+        public static Nest<T> ReduceFrom<TOut>(INestStructure<TOut> input) where TOut: INestStructure<T>
+        {
+            var nested = input.AsNest();
+            return ReduceInternal(nested).AsNest();
+        }
+
+        private static INestStructure<T> ReduceInternal<TOut>(Nest<TOut> node) where TOut : INestStructure<T>
+        {
+            if(node.NestType == NestType.Empty)
+            {
+                return Nest<T>.Empty;
+            }
+            else if(node.NestType == NestType.Node)
+            {
+                return node.NodeValue!.AsNest();
+            }
+            else if(node.NestType == NestType.List)
+            {
+                return new Nest<T>(node.ListValue!.Select(x => ReduceInternal(x.AsNest())));
+            }
+            else // Dictionary type
+            {
+                return new Nest<T>(node.DictValue!.ToDictionary(x => x.Key, x => ReduceInternal(x.Value.AsNest())));
+            }
+        }
+
+        private static bool FindInternal(Nest<T> node, int index, out T? result)
+        {
+            if (node.NestType == NestType.Node)
+            {
+                if(index == 0)
+                {
+                    result = node.NodeValue!;
+                    return true;
+                }
+                result = default(T);
+                return false;
+            }
+            else if (node.NestType == NestType.List)
+            {
+                foreach (var item in node.ListValue!)
+                {
+                    if(index == 0)
+                    {
+                        return FindInternal(item.AsNest(), index, out result);
+                    }
+                    index--;
+                }
+                result = default(T);
+                return false;
+            }
+            else if(node.NestType == NestType.Dictionary)
+            {
+                foreach (var item in node.DictValue!.Values)
+                {
+                    if (index == 0)
+                    {
+                        return FindInternal(item.AsNest(), index, out result);
+                    }
+                    index--;
+                }
+                result = default(T);
+                return false;
+            }
+            else
+            {
+                result = default(T);
+                return false;
+            }
+        }
+
+        private static bool SetInternal(Nest<T> node, int index, T newValue)
+        {
+            if (node.NestType == NestType.Node)
+            {
+                if (index == 0)
+                {
+                    node.NodeValue = newValue;
+                    return true;
+                }
+                return false;
+            }
+            else if (node.NestType == NestType.List)
+            {
+                foreach (var item in node.ListValue!)
+                {
+                    if (index == 0)
+                    {
+                        return SetInternal(item.AsNest(), index, newValue);
+                    }
+                    index--;
+                }
+                return false;
+            }
+            else if (node.NestType == NestType.Dictionary)
+            {
+                foreach (var item in node.DictValue!.Values)
+                {
+                    if (index == 0)
+                    {
+                        return SetInternal(item.AsNest(), index, newValue);
+                    }
+                    index--;
+                }
+                return false;
+            }
+            else
+            {
+                return false;
+            }
+        }
+
+        private static IEnumerable<T> FlattenInternal(Nest<T> node)
+        {
+            if (node.NestType == NestType.Node)
+            {
+                yield return node.NodeValue!;
+            }
+            else if (node.NestType == NestType.List)
+            {
+                foreach (var item in node.ListValue!)
+                {
+                    foreach(var val in FlattenInternal(item.AsNest()))
+                    {
+                        yield return val;
+                    }
+                }
+            }
+            else if (node.NestType == NestType.Dictionary)
+            {
+                foreach (var item in node.DictValue!.Values)
+                {
+                    foreach (var val in FlattenInternal(item.AsNest()))
+                    {
+                        yield return val;
+                    }
+                }
+            }
+        }
+
+        private Nest<TOut> MapStructureInternal<TOut>(Func<T, TOut> func)
+        {
+            if (NestType == NestType.Node)
+            {
+                return new Nest<TOut>(func(NodeValue!));
+            }
+            else if (NestType == NestType.List)
+            {
+                List<Nest<TOut>> outs = new List<Nest<TOut>>();
+                foreach (var item in ListValue!)
+                {
+                    outs.Add(item.AsNest().MapStructureInternal(func));
+                }
+                return new Nest<TOut>(outs);
+            }
+            else if (NestType == NestType.Dictionary)
+            {
+                Dictionary<string, INestStructure<TOut>> outs = new Dictionary<string, INestStructure<TOut>>();
+                foreach (var (key, value) in DictValue!)
+                {
+                    outs.Add(key, value.AsNest().MapStructureInternal(func));
+                }
+                return new Nest<TOut>(outs);
+            }
+            else
+            {
+                return Nest<TOut>.Empty;
+            }
+        }
+
+        public IEnumerator<T> GetEnumerator()
+        {
+            return Flatten().GetEnumerator();
+        }
+
+        IEnumerator IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
+
+        public override string ToString()
+        {
+            StringBuilder sb = new StringBuilder();
+            sb.Append("(");
+            WriteString(this, sb);
+            sb.Append(")");
+            return sb.ToString();
+        }
+
+        private static void WriteString(Nest<T> node,  StringBuilder sb)
+        {
+            if (!string.IsNullOrEmpty(node.Name))
+            {
+                sb.Append($"{node.Name}: ");
+            }
+            if (node.NestType == NestType.Node)
+            {
+                sb.Append(node.NodeValue!.ToString());
+            }
+            else if (node.NestType == NestType.List)
+            {
+                sb.Append("[");
+                for(int i = 0; i < node.ListValue!.Count; i++)
+                {
+                    WriteString(node.ListValue![i].AsNest(), sb);
+                    if(i != node.ListValue!.Count - 1)
+                    {
+                        sb.Append(", ");
+                    }
+                }
+                sb.Append("]");
+            }
+            else if (node.NestType == NestType.Dictionary)
+            {
+                sb.Append("{");
+                int count = node.DictValue!.Count;
+                int i = 0;
+                foreach (var (key, value) in node.DictValue!)
+                {
+                    sb.Append($"{key}: ");
+                    WriteString(value.AsNest(), sb);
+                    if (i != count - 1)
+                    {
+                        sb.Append(", ");
+                    }
+                    i++;
+                }
+                sb.Append("}");
+            }
+            else
+            {
+                sb.Append("<empty>");
+            }
+        }
+
+        public static implicit operator Nest<T>((INestStructure<T>, INestStructure<T>) inputs)
+        {
+            return new Nest<T>(new INestStructure<T>[] { inputs.Item1, inputs.Item2 });
+        }
+
+        public static implicit operator Nest<T>((INestStructure<T>, INestStructure<T>, INestStructure<T>) inputs)
+        {
+            return new Nest<T>(new INestStructure<T>[] { inputs.Item1, inputs.Item2, inputs.Item3 });
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs
new file mode 100644
index 000000000..cf1994554
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/NestDictionary.cs
@@ -0,0 +1,103 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    public class NestDictionary<TKey, TValue> : INestStructure<TValue>, IDictionary<TKey, TValue> where TKey : notnull
+    {
+        public NestType NestType => NestType.Dictionary;
+        public IDictionary<TKey, TValue> Value { get; set; }
+        public int ShallowNestedCount => Values.Count;
+
+        public int TotalNestedCount => Values.Count;
+        public NestDictionary(IDictionary<TKey, TValue> dict)
+        {
+            Value = dict;
+        }
+        public IEnumerable<TValue> Flatten()
+        {
+            return Value.Select(x => x.Value);
+        }
+        public INestStructure<TOut> MapStructure<TOut>(Func<TValue, TOut> func)
+        {
+            return new NestList<TOut>(Value.Select(x => func(x.Value)));
+        }
+
+        public Nest<TValue> AsNest()
+        {
+            return new Nest<TValue>(Value.Values.Select(x => new Nest<TValue>(x)));
+        }
+
+        // Required IDictionary<TKey, TValue> members
+        public int Count => Value.Count;
+
+        public bool IsReadOnly => Value.IsReadOnly;
+
+        public ICollection<TKey> Keys => Value.Keys;
+
+        public ICollection<TValue> Values => Value.Values;
+
+        public void Add(TKey key, TValue value)
+        {
+            Value.Add(key, value);
+        }
+
+        public void Add(KeyValuePair<TKey, TValue> item)
+        {
+            Value.Add(item);
+        }
+
+        public void Clear()
+        {
+            Value.Clear();
+        }
+
+        public bool Contains(KeyValuePair<TKey, TValue> item)
+        {
+            return Value.Contains(item);
+        }
+
+        public bool ContainsKey(TKey key)
+        {
+            return Value.ContainsKey(key);
+        }
+
+        public void CopyTo(KeyValuePair<TKey, TValue>[] array, int arrayIndex)
+        {
+            Value.CopyTo(array, arrayIndex);
+        }
+
+        public IEnumerator<KeyValuePair<TKey, TValue>> GetEnumerator()
+        {
+            return Value.GetEnumerator();
+        }
+
+        IEnumerator IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
+
+        public bool Remove(TKey key)
+        {
+            return Value.Remove(key);
+        }
+
+        public bool Remove(KeyValuePair<TKey, TValue> item)
+        {
+            return Value.Remove(item);
+        }
+
+        public bool TryGetValue(TKey key, out TValue value)
+        {
+            return Value.TryGetValue(key, out value);
+        }
+
+        // Optional IDictionary<TKey, TValue> members
+        public TValue this[TKey key]
+        {
+            get => Value[key];
+            set => Value[key] = value;
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/NestList.cs b/src/TensorFlowNET.Core/Common/Types/NestList.cs
new file mode 100644
index 000000000..1e0d272b7
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/NestList.cs
@@ -0,0 +1,53 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    /// <summary>
+    /// The implementation of a list that support nest structure, in which the depth is 1.
+    /// </summary>
+    /// <typeparam name="T"></typeparam>
+    public sealed class NestList<T> : INestStructure<T>, IEnumerable<T>
+    {
+        public NestType NestType => NestType.List;
+        public List<T> Values { get; set; }
+        public int ShallowNestedCount => Values.Count;
+
+        public int TotalNestedCount => Values.Count;
+
+        public NestList(params T[] values)
+        {
+            Values = new List<T>(values);
+        }
+
+        public NestList(IEnumerable<T> values)
+        {
+            Values = new List<T>(values);
+        }
+        public IEnumerable<T> Flatten()
+        {
+            return Values;
+        }
+        public INestStructure<TOut> MapStructure<TOut>(Func<T, TOut> func)
+        {
+            return new NestList<TOut>(Values.Select(x => func(x)));
+        }
+
+        public Nest<T> AsNest()
+        {
+            return new Nest<T>(Values.Select(x => new Nest<T>(x)));
+        }
+
+        // Enumerator implementation
+        public IEnumerator<T> GetEnumerator()
+        {
+            return Values.GetEnumerator();
+        }
+
+        IEnumerator IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Common/Types/NestNode.cs b/src/TensorFlowNET.Core/Common/Types/NestNode.cs
new file mode 100644
index 000000000..701aade9a
--- /dev/null
+++ b/src/TensorFlowNET.Core/Common/Types/NestNode.cs
@@ -0,0 +1,36 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Common.Types
+{
+    /// <summary>
+    /// A nested structure with only one element.
+    /// </summary>
+    /// <typeparam name="T"></typeparam>
+    public class NestNode<T> : INestStructure<T>
+    {
+        public NestType NestType => NestType.Node;
+        public T Value { get; set; }
+        public int ShallowNestedCount => 1;
+
+        public int TotalNestedCount => 1;
+        public NestNode(T value)
+        {
+            Value = value;
+        }
+        public IEnumerable<T> Flatten()
+        {
+            yield return Value;
+        }
+        public INestStructure<TOut> MapStructure<TOut>(Func<T, TOut> func)
+        {
+            return new NestNode<TOut>(func(Value));
+        }
+
+        public Nest<T> AsNest()
+        {
+            return new Nest<T>(Value);
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs b/src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs
similarity index 95%
rename from src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs
rename to src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs
index 7abcfde26..a36930eca 100644
--- a/src/TensorFlowNET.Core/Keras/Saving/TensorShapeConfig.cs
+++ b/src/TensorFlowNET.Core/Common/Types/TensorShapeConfig.cs
@@ -3,7 +3,7 @@
 using System.Collections.Generic;
 using System.Linq;
 
-namespace Tensorflow.Keras.Saving
+namespace Tensorflow.Common.Types
 {
     public class TensorShapeConfig
     {
diff --git a/src/TensorFlowNET.Core/Data/DatasetV2.cs b/src/TensorFlowNET.Core/Data/DatasetV2.cs
index 324d7e834..c1762d670 100644
--- a/src/TensorFlowNET.Core/Data/DatasetV2.cs
+++ b/src/TensorFlowNET.Core/Data/DatasetV2.cs
@@ -161,8 +161,8 @@ public override string ToString()
                     break;
                 }
 
-                yield return (new Tensors(results.Take(FirstInputTensorCount)), results.Length == FirstInputTensorCount ? 
-                    null : new Tensors(results.Skip(FirstInputTensorCount)));
+                yield return (new Tensors(results.Take(FirstInputTensorCount).ToArray()), results.Length == FirstInputTensorCount ? 
+                    null : new Tensors(results.Skip(FirstInputTensorCount).ToArray()));
             }
         }
 
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
index f1a09ed7b..0ce55841b 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_FastPathExecute.cs
@@ -352,13 +352,19 @@ bool SetOpAttrScalar(Context ctx, SafeEagerOpHandle op,
                     c_api.TFE_OpSetAttrFloat(op, key, Convert.ToSingle(value));
                     break;
                 case TF_AttrType.TF_ATTR_SHAPE:
-                    var dims = (value as long[]).ToArray();
+                    long[] dims;
+                    if (value is Shape shape) dims = shape.dims.ToArray();
+                    else if (value is long[] longs) dims = longs.ToArray();
+                    else if (value is int[] ints) dims = ints.Select(x => (long)x).ToArray();
+                    else dims = ((long[])value).ToArray();
                     c_api.TFE_OpSetAttrShape(op, key, dims, dims.Length, status);
                     status.Check(true);
                     break;
                 case TF_AttrType.TF_ATTR_FUNC:
                     if (value is ConcreteFunction func)
                         c_api.TFE_OpSetAttrFunctionName(op, key, func.func_graph.FuncName, func.func_graph.FuncName.Length);
+                    else if(value is string str)
+                        c_api.TFE_OpSetAttrFunctionName(op, key, str, str.Length);
                     else
                         throw new NotImplementedException("TF_AttrType.TF_ATTR_FUNC");
                     break;
diff --git a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs
index 1f7b3ae64..3515fed83 100644
--- a/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerRunner.TFE_TapeGradient.cs
@@ -65,7 +65,7 @@ public Tensor[] TFE_TapeGradient(ITape tape,
             {
                 outgrad_vec = output_gradients.ToList();
             }
-            var result = tape.ComputeGradient(target_vec, sources_vec, source_tensors_that_are_targets, outgrad_vec, false);
+            var result = tape.ComputeGradient(target_vec, sources_vec, source_tensors_that_are_targets, outgrad_vec, true);
 
 
             bool unconnected_gradients_zero = unconnected_gradients == "zero";
@@ -137,7 +137,6 @@ TapeTensor TapeTensorFromTensor(Tensor tensor)
                 {
                     dims[i] = c_api.TFE_TensorHandleDim(handle, i, status);
                 }
-                Shape tensor_shape = new(dims);
 
                 if(status.Code != TF_Code.TF_OK)
                 {
@@ -145,6 +144,7 @@ TapeTensor TapeTensorFromTensor(Tensor tensor)
                 }
                 else
                 {
+                    Shape tensor_shape = new(dims);
                     return new TapeTensor(id, dtype, tensor_shape);
                 }
             }
@@ -173,8 +173,12 @@ bool DTypeNeedsHandleData(TF_DataType dtype)
             return dtype == dtypes.variant || dtype == dtypes.resource;
         }
 
-        bool ListContainNone(long[] list)
+        bool ListContainNone(long[]? list)
         {
+            if(list is null)
+            {
+                return true;
+            }
             int len = list.Length;
             if(len == 0)
             {
diff --git a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs
index ce3c983b5..71b3075aa 100644
--- a/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs
+++ b/src/TensorFlowNET.Core/Eager/EagerTensor.ToString.cs
@@ -10,6 +10,11 @@ public override string ToString()
             var str = NDArrayRender.ToString(nd);
             return $"tf.Tensor: shape={shape}, dtype={dtype.as_numpy_name()}, numpy={str}";
         }
-            
+        public string ToString(int maxLength)
+        {
+            var nd = new NDArray(this);
+            var str = NDArrayRender.ToString(nd, maxLength);
+            return $"tf.Tensor: shape={shape}, dtype={dtype.as_numpy_name()}, numpy={str}";
+        }
     }
 }
diff --git a/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs b/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs
new file mode 100644
index 000000000..c283c1a45
--- /dev/null
+++ b/src/TensorFlowNET.Core/Exceptions/NotOkStatusException.cs
@@ -0,0 +1,19 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Exceptions
+{
+    public class NotOkStatusException : TensorflowException
+    {
+        public NotOkStatusException() : base()
+        {
+
+        }
+
+        public NotOkStatusException(string message) : base(message)
+        {
+
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
index 083d4813a..ac099ae2b 100644
--- a/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
+++ b/src/TensorFlowNET.Core/Framework/Models/TensorSpec.cs
@@ -1,4 +1,5 @@
 using System.Linq;
+using Tensorflow.Eager;
 
 namespace Tensorflow.Framework.Models
 {
@@ -24,5 +25,17 @@ public TensorSpec _batch(int dim = -1)
             shapes.Insert(0, dim);
             return new TensorSpec(shapes.ToArray(), _dtype);
         }
+
+        public static TensorSpec FromTensor(Tensor tensor, string? name = null)
+        {
+            if(tensor is EagerTensor)
+            {
+                return new TensorSpec(tensor.shape, tensor.dtype, name);
+            }
+            else
+            {
+                return new TensorSpec(tensor.shape, tensor.dtype, name ?? tensor.name);
+            }
+        }
     }
 }
diff --git a/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs b/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs
new file mode 100644
index 000000000..28d9e5008
--- /dev/null
+++ b/src/TensorFlowNET.Core/Framework/auto_control_deps_utils.cs
@@ -0,0 +1,89 @@
+using Tensorflow.Graphs;
+
+namespace Tensorflow.Framework
+{
+    internal static class auto_control_deps_utils
+    {
+        public static readonly string READ_ONLY_RESOURCE_INPUTS_ATTR = "_read_only_resource_inputs";
+        public static List<int> get_read_only_resource_input_indices_graph(FuncGraph func_graph)
+        {
+            List<int> result = new List<int>();
+            // A cache to store the read only resource inputs of an Op.
+            // Operation -> ObjectIdentitySet of resource handles.
+            Dictionary<Operation, HashSet<Tensor>> opReadOnlyResourceInputs =
+                new Dictionary<Operation, HashSet<Tensor>>();
+
+            for (int inputIndex = 0; inputIndex < func_graph.Inputs.Length; inputIndex++)
+            {
+                Tensor t = func_graph.Inputs[inputIndex];
+                if (t.dtype != dtypes.resource)
+                    continue;
+
+                bool readOnly = true;
+                foreach (var op in t.consumers())
+                {
+                    if (opReadOnlyResourceInputs.ContainsKey(op))
+                    {
+                        if (!opReadOnlyResourceInputs[op].Contains(t))
+                        {
+                            readOnly = false;
+                            break;
+                        }
+                    }
+                    else
+                    {
+                        List<int> indices = _get_read_only_resource_input_indices_op(op);
+                        opReadOnlyResourceInputs[op] = new HashSet<Tensor>(
+                            indices.Select(i => op.inputs[i]));
+                        if (!opReadOnlyResourceInputs[op].Contains(t))
+                        {
+                            readOnly = false;
+                            break;
+                        }
+                    }
+                }
+
+                if (readOnly)
+                    result.Add(inputIndex);
+            }
+
+            return result;
+        }
+
+        private static List<int> _get_read_only_resource_input_indices_op(Operation op)
+        {
+            // ignore the RESOURCE_READ_OPS
+
+            int[] read_only_input_indices;
+
+            try
+            {
+                read_only_input_indices = op.get_attr<int[]>(READ_ONLY_RESOURCE_INPUTS_ATTR);
+            }
+            catch (InvalidArgumentError)
+            {
+                return new List<int>();
+            }
+
+            int read_only_index = 0;
+            List<int> result = new();
+            for (int i = 0; i < op.inputs.Length; i++)
+            {
+                if (read_only_index >= read_only_input_indices.Length)
+                {
+                    break;
+                }
+                if (op.inputs[i].dtype != dtypes.resource)
+                {
+                    continue;
+                }
+                if (read_only_index < read_only_input_indices.Length && i == read_only_input_indices[read_only_index])
+                {
+                    result.Add(i);
+                    read_only_index++;
+                }
+            }
+            return result;
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Framework/function_def_lib.cs b/src/TensorFlowNET.Core/Framework/function_def_lib.cs
index 67f8d324e..488c6b654 100644
--- a/src/TensorFlowNET.Core/Framework/function_def_lib.cs
+++ b/src/TensorFlowNET.Core/Framework/function_def_lib.cs
@@ -42,10 +42,10 @@ public static FuncGraph function_def_to_graph(FunctionDef fdef, object? structur
             func_graph.as_default();
             importer.import_graph_def(graph_def, name: "", validate_colocation_constraints: false);
             var input_tensor_names = fdef.Signature.InputArg.Select(x => nested_to_flat_tensor_name[x.Name]);
-            func_graph.Inputs = new Tensors(input_tensor_names.Select(x => func_graph.get_tensor_by_name(x)));
+            func_graph.Inputs = new Tensors(input_tensor_names.Select(x => func_graph.get_tensor_by_name(x)).ToArray());
 
             var output_tensor_names = fdef.Signature.OutputArg.Select(x => nested_to_flat_tensor_name[fdef.Ret[x.Name]]);
-            func_graph.Outputs = new Tensors(output_tensor_names.Select(x => func_graph.get_tensor_by_name(x)));
+            func_graph.Outputs = new Tensors(output_tensor_names.Select(x => func_graph.get_tensor_by_name(x)).ToArray());
             // TODO(Rinne): func_graph.ControlOutputs
             _set_handle_data(func_graph, fdef);
 
diff --git a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs
index 88dce7d98..8742e4535 100644
--- a/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs
+++ b/src/TensorFlowNET.Core/Functions/ConcreteFunction.cs
@@ -8,6 +8,7 @@
 using Tensorflow.Graphs;
 using Tensorflow.Train;
 using Tensorflow.Util;
+using Tensorflow.Common.Extensions;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Functions
@@ -40,6 +41,18 @@ public class ConcreteFunction: Trackable
         public Tensor[] FlatStructuredOutputs => func_graph.FlatStructuredOutputs;
         public IEnumerable<IVariableV1> Variables => func_graph.Variables;
         public IEnumerable<IVariableV1> TrainableVariables => func_graph.TrainableVariables;
+        internal NameAttrList AsNameAttrList
+        {
+            get
+            {
+                NameAttrList ret = new() { Name = this.Name };
+                foreach (var (name, value) in _attrs)
+                {
+                    ret.Attr[name] = value;
+                }
+                return ret;
+            }
+        }
 
         public ConcreteFunction(string name)
         {
diff --git a/src/TensorFlowNET.Core/Gradients/array_grad.cs b/src/TensorFlowNET.Core/Gradients/array_grad.cs
index f939f7b69..1b6bc95ee 100644
--- a/src/TensorFlowNET.Core/Gradients/array_grad.cs
+++ b/src/TensorFlowNET.Core/Gradients/array_grad.cs
@@ -90,8 +90,7 @@ private static Tensor[] _ConcatGradHelper(Operation op, Tensor grad, int start_v
                     ? input_values[0].rank + dim_int 
                     : dim_int % input_values[0].rank;
                 var sizes = input_values.Select(x => x.shape[non_neg_concat_dim]).ToArray();
-                var sizes_tensor = constant_op.constant(sizes);
-                out_grads = array_ops.split(grad, sizes_tensor, non_neg_concat_dim).ToList();
+                out_grads = array_ops.split(grad, sizes.Select(x => (int)x).ToArray(), ops.convert_to_tensor(non_neg_concat_dim)).ToList();
             }
             else if (constant_op.is_constant(concat_dim))
             {
@@ -127,7 +126,7 @@ there will be a small number of performance regressions.*/
                             new Tensor[] { non_neg_concat_dim, tf.constant(0) },
                             new Tensor[] { tf.constant(1), tf.constant(-1) });
                     var squeeze_sizes = array_ops.squeeze(slice);
-                    out_grads = array_ops.split(axis: grad, value: squeeze_sizes, num_split: (int)non_neg_concat_dim).ToList();
+                    out_grads = array_ops.split(axis: grad, value: squeeze_sizes, num_or_size_splits: (int)non_neg_concat_dim).ToList();
                 }
                 else
                 {
diff --git a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs
index 3bce52ea5..ba7d7068e 100644
--- a/src/TensorFlowNET.Core/Graphs/FuncGraph.cs
+++ b/src/TensorFlowNET.Core/Graphs/FuncGraph.cs
@@ -81,7 +81,7 @@ internal set
     public IEnumerable<IVariableV1> TrainableVariables => Variables.Where(v => v.Trainable);
     public Dictionary<string, AttrValue> Attrs { get; set; }
 
-    Dictionary<long, (Tensor, Tensor)> _captures
+    internal Dictionary<long, (Tensor, Tensor)> _captures
         = new Dictionary<long, (Tensor, Tensor)>();
 
     public Tensor[] external_captures
@@ -399,7 +399,7 @@ public static FuncGraph func_graph_from_func(string name, Func<object[], object[
         var flat_func_args = nest.flatten(func_args as object);
         var flat_func_kwargs = nest.flatten(func_kwargs as object);
         func_graph.Inputs = new Tensors(flat_func_args.concat(flat_func_kwargs)
-            .Where(x => x is Tensor).Select(x => (Tensor)x));
+            .Where(x => x is Tensor).Select(x => (Tensor)x).ToArray());
 
         //var func_args_before = nest.pack_sequence_as(func_args, flat_func_args, true);
         //var func_kwargs_before = nest.pack_sequence_as(func_kwargs, flat_func_kwargs, true);
diff --git a/src/TensorFlowNET.Core/Graphs/Graph.cs b/src/TensorFlowNET.Core/Graphs/Graph.cs
index eb8df5812..9e879a0f0 100644
--- a/src/TensorFlowNET.Core/Graphs/Graph.cs
+++ b/src/TensorFlowNET.Core/Graphs/Graph.cs
@@ -129,7 +129,7 @@ public int seed
             }
         }
 
-        protected Graph outer_graph;
+        internal Graph outer_graph;
         public Graph OuterGraph => outer_graph;
         public Dictionary<string, EagerDefinedFunction> Functions => _functions;
         public SafeGraphHandle c_graph => _handle;
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs
index 764641474..db76fda06 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMArgs.cs
@@ -4,8 +4,6 @@ public class LSTMArgs : RNNArgs
     {
         // TODO: maybe change the `RNNArgs` and implement this class.
         public bool UnitForgetBias { get; set; }
-        public float Dropout { get; set; }
-        public float RecurrentDropout { get; set; }
         public int Implementation { get; set; }
     }
 }
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs
index 594c99bb0..786236e4d 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/LSTMCellArgs.cs
@@ -1,7 +1,35 @@
-namespace Tensorflow.Keras.ArgsDefinition.Rnn
+using Newtonsoft.Json;
+using static Tensorflow.Binding; 
+
+namespace Tensorflow.Keras.ArgsDefinition.Rnn
 {
     // TODO: complete the implementation
-    public class LSTMCellArgs : LayerArgs
+    public class LSTMCellArgs : AutoSerializeLayerArgs
     {
+        [JsonProperty("units")]
+        public int Units { get; set; }
+        // TODO(Rinne): lack of initialized value of Activation. Merging keras
+        // into tf.net could resolve it.
+        [JsonProperty("activation")]
+        public Activation Activation { get; set; }
+        [JsonProperty("recurrent_activation")]
+        public Activation RecurrentActivation { get; set; }
+        [JsonProperty("use_bias")]
+        public bool UseBias { get; set; } = true;
+        [JsonProperty("dropout")]
+        public float Dropout { get; set; } = .0f;
+        [JsonProperty("recurrent_dropout")]
+        public float RecurrentDropout { get; set; } = .0f;
+        [JsonProperty("kernel_initializer")]
+        public IInitializer KernelInitializer { get; set; }
+        [JsonProperty("recurrent_initializer")]
+        public IInitializer RecurrentInitializer { get; set; }
+        [JsonProperty("bias_initializer")]
+        public IInitializer BiasInitializer { get; set; }
+        [JsonProperty("unit_forget_bias")]
+        public bool UnitForgetBias { get; set; } = true;
+        [JsonProperty("implementation")]
+        public int Implementation { get; set; } = 2;
+
     }
 }
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs
index 2585592c1..2d7fb001a 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RNNArgs.cs
@@ -1,17 +1,12 @@
 using Newtonsoft.Json;
 using System.Collections.Generic;
+using Tensorflow.Keras.Layers.Rnn;
 
 namespace Tensorflow.Keras.ArgsDefinition.Rnn
 {
+    // TODO(Rinne): add regularizers.
     public class RNNArgs : AutoSerializeLayerArgs
     {
-        public interface IRnnArgCell : ILayer
-        {
-            object state_size { get; }
-        }
-        [JsonProperty("cell")]
-        // TODO: the cell should be serialized with `serialize_keras_object`.
-        public IRnnArgCell Cell { get; set; } = null;
         [JsonProperty("return_sequences")]
         public bool ReturnSequences { get; set; } = false;
         [JsonProperty("return_state")]
@@ -24,8 +19,10 @@ public interface IRnnArgCell : ILayer
         public bool Unroll { get; set; } = false;
         [JsonProperty("time_major")]
         public bool TimeMajor { get; set; } = false;
+
+        public int? InputDim { get; set; }
+        public int? InputLength { get; set; }
         // TODO: Add `num_constants` and `zero_output_for_mask`.
-        public Dictionary<string, object> Kwargs { get; set; } = null;
 
         public int Units { get; set; }
         public Activation Activation { get; set; }
@@ -34,21 +31,8 @@ public interface IRnnArgCell : ILayer
         public IInitializer KernelInitializer { get; set; }
         public IInitializer RecurrentInitializer { get; set; }
         public IInitializer BiasInitializer { get; set; }
-
-        // kernel_regularizer=None,
-        // recurrent_regularizer=None,
-        // bias_regularizer=None,
-        // activity_regularizer=None,
-        // kernel_constraint=None,
-        // recurrent_constraint=None,
-        // bias_constraint=None,
-        // dropout=0.,
-        // recurrent_dropout=0.,
-        // return_sequences=False,
-        // return_state=False,
-        // go_backwards=False,
-        // stateful=False,
-        // unroll=False,
-        // **kwargs):
+        public float Dropout { get; set; } = .0f;
+        public bool ZeroOutputForMask { get; set; } = false;
+        public float RecurrentDropout { get; set; } = .0f;
     }
 }
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs
new file mode 100644
index 000000000..64b500bba
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/RnnOptionalArgs.cs
@@ -0,0 +1,14 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Common.Types;
+
+namespace Tensorflow.Keras.ArgsDefinition.Rnn
+{
+    public class RnnOptionalArgs: IOptionalArgs
+    {
+        public string Identifier => "Rnn";
+        public Tensor Mask { get; set; } = null;
+        public Tensors Constants { get; set; } = null;
+    }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs
new file mode 100644
index 000000000..d21d61905
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/SimpleRNNCellArgs.cs
@@ -0,0 +1,27 @@
+using Newtonsoft.Json;
+
+namespace Tensorflow.Keras.ArgsDefinition.Rnn
+{
+    public class SimpleRNNCellArgs: AutoSerializeLayerArgs
+    {
+        [JsonProperty("units")]
+        public int Units { get; set; }
+        // TODO(Rinne): lack of initialized value of Activation. Merging keras
+        // into tf.net could resolve it.
+        [JsonProperty("activation")]
+        public Activation Activation { get; set; }
+        [JsonProperty("use_bias")]
+        public bool UseBias { get; set; } = true;
+        [JsonProperty("dropout")]
+        public float Dropout { get; set; } = .0f;
+        [JsonProperty("recurrent_dropout")]
+        public float RecurrentDropout { get; set; } = .0f;
+        [JsonProperty("kernel_initializer")]
+        public IInitializer KernelInitializer { get; set; }
+        [JsonProperty("recurrent_initializer")]
+        public IInitializer RecurrentInitializer { get; set; }
+        [JsonProperty("bias_initializer")]
+        public IInitializer BiasInitializer { get; set; }
+
+    }
+}
diff --git a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs
index fdfadab85..50a6127df 100644
--- a/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs
+++ b/src/TensorFlowNET.Core/Keras/ArgsDefinition/Rnn/StackedRNNCellsArgs.cs
@@ -1,10 +1,10 @@
 using System.Collections.Generic;
+using Tensorflow.Keras.Layers.Rnn;
 
 namespace Tensorflow.Keras.ArgsDefinition.Rnn
 {
     public class StackedRNNCellsArgs : LayerArgs
     {
-        public IList<RnnCell> Cells { get; set; }
-        public Dictionary<string, object> Kwargs { get; set; } = null;
+        public bool ReverseStateOrder = false;
     }
 }
diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs
index f76693945..e94c8bf10 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/ILayer.cs
@@ -1,4 +1,5 @@
-using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
 using Tensorflow.NumPy;
 using Tensorflow.Training;
@@ -14,7 +15,7 @@ public interface ILayer: IWithTrackable, IKerasConfigable
         List<ILayer> Layers { get; }
         List<INode> InboundNodes { get; }
         List<INode> OutboundNodes { get; }
-        Tensors Apply(Tensors inputs, Tensor state = null, bool training = false);
+        Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null);
         List<IVariableV1> TrainableVariables { get; }
         List<IVariableV1> TrainableWeights { get; }
         List<IVariableV1> NonTrainableWeights { get; }
diff --git a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
index 6a29f9e5e..a19508d42 100644
--- a/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
+++ b/src/TensorFlowNET.Core/Keras/Layers/ILayersApi.cs
@@ -1,5 +1,6 @@
 using System;
 using Tensorflow.Framework.Models;
+using Tensorflow.Keras.Layers.Rnn;
 using Tensorflow.NumPy;
 using static Google.Protobuf.Reflection.FieldDescriptorProto.Types;
 
@@ -159,6 +160,18 @@ public ILayer LayerNormalization(Axis? axis,
         public ILayer Normalization(Shape? input_shape = null, int? axis = -1, float? mean = null, float? variance = null, bool invert = false);
         public ILayer LeakyReLU(float alpha = 0.3f);
 
+        public IRnnCell LSTMCell(int uints,
+            string activation = "tanh",
+            string recurrent_activation = "sigmoid",
+            bool use_bias = true,
+            string kernel_initializer = "glorot_uniform",
+            string recurrent_initializer = "orthogonal",
+            string bias_initializer = "zeros",
+            bool unit_forget_bias = true,
+            float dropout = 0f,
+            float recurrent_dropout = 0f,
+            int implementation = 2);
+
         public ILayer LSTM(int units,
             Activation activation = null,
             Activation recurrent_activation = null,
@@ -192,6 +205,19 @@ public ILayer Rescaling(float scale,
             float offset = 0,
             Shape input_shape = null);
 
+        public IRnnCell SimpleRNNCell(
+            int units,
+            string activation = "tanh",
+            bool use_bias = true,
+            string kernel_initializer = "glorot_uniform",
+            string recurrent_initializer = "orthogonal",
+            string bias_initializer = "zeros",
+            float dropout = 0f,
+            float recurrent_dropout = 0f);
+
+        public IRnnCell StackedRNNCells(
+            IEnumerable<IRnnCell> cells);
+
         public ILayer SimpleRNN(int units,
             string activation = "tanh",
             string kernel_initializer = "glorot_uniform",
@@ -200,6 +226,26 @@ public ILayer SimpleRNN(int units,
             bool return_sequences = false,
             bool return_state = false);
 
+        public ILayer RNN(
+            IRnnCell cell,
+            bool return_sequences = false,
+            bool return_state = false,
+            bool go_backwards = false,
+            bool stateful = false,
+            bool unroll = false,
+            bool time_major = false
+            );
+
+        public ILayer RNN(
+            IEnumerable<IRnnCell> cell,
+            bool return_sequences = false,
+            bool return_state = false,
+            bool go_backwards = false,
+            bool stateful = false,
+            bool unroll = false,
+            bool time_major = false
+            );
+
         public ILayer Subtract();
     }
 }
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs
new file mode 100644
index 000000000..8d6fbc976
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IRnnCell.cs
@@ -0,0 +1,25 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Common.Types;
+
+namespace Tensorflow.Keras.Layers.Rnn
+{
+    public interface IRnnCell: ILayer
+    {
+        /// <summary>
+        /// If the derived class tends to not implement it, please return null.
+        /// </summary>
+        INestStructure<long>? StateSize { get; }
+        /// <summary>
+        /// If the derived class tends to not implement it, please return null.
+        /// </summary>
+        INestStructure<long>? OutputSize { get; }
+        /// <summary>
+        /// Whether the optional RNN args are supported when appying the layer.
+        /// In other words, whether `Apply` is overwrited with process of `RnnOptionalArgs`.
+        /// </summary>
+        bool SupportOptionalArgs { get; }
+        Tensors GetInitialState(Tensors inputs, Tensor batch_size, TF_DataType dtype);
+    }
+}
diff --git a/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs
new file mode 100644
index 000000000..e73244a51
--- /dev/null
+++ b/src/TensorFlowNET.Core/Keras/Layers/Rnn/IStackedRnnCells.cs
@@ -0,0 +1,12 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Tensorflow.Keras.Layers.Rnn
+{
+    public interface IStackedRnnCells : IRnnCell
+    {
+        int Count { get; }
+        IRnnCell this[int idx] { get; }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs
index 1a4245bf2..3a21db9d2 100644
--- a/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs
+++ b/src/TensorFlowNET.Core/Keras/Saving/Json/CustomizedKerasShapesWrapperJsonConverter.cs
@@ -3,6 +3,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Saving.Json
 {
diff --git a/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs b/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs
index d91d3161d..ea6fe976f 100644
--- a/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs
+++ b/src/TensorFlowNET.Core/Keras/Saving/KerasShapesWrapper.cs
@@ -6,6 +6,7 @@
 using System.Diagnostics;
 using OneOf.Types;
 using Tensorflow.Keras.Saving.Json;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Saving
 {
diff --git a/src/TensorFlowNET.Core/NumPy/Axis.cs b/src/TensorFlowNET.Core/NumPy/Axis.cs
index 976c764f2..7a3ecbf10 100644
--- a/src/TensorFlowNET.Core/NumPy/Axis.cs
+++ b/src/TensorFlowNET.Core/NumPy/Axis.cs
@@ -74,8 +74,3 @@ public override string ToString()
             => IsScalar ? $"{axis[0]}" : $"({string.Join(", ", axis)})";
     }
 }
-
-namespace System.Runtime.CompilerServices
-{
-    internal static class IsExternalInit { }
-}
diff --git a/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs b/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs
index 02cb5926c..230797b8b 100644
--- a/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs
+++ b/src/TensorFlowNET.Core/NumPy/NDArrayRender.cs
@@ -7,7 +7,7 @@ namespace Tensorflow.NumPy
 {
     public class NDArrayRender
     {
-        public static string ToString(NDArray array)
+        public static string ToString(NDArray array, int maxLength = 10)
         {
             Shape shape = array.shape;
             if (shape.IsScalar)
@@ -15,12 +15,12 @@ public static string ToString(NDArray array)
 
             var s = new StringBuilder();
             s.Append("array(");
-            Build(s, array);
+            Build(s, array, maxLength);
             s.Append(")");
             return s.ToString();
         }
 
-        static void Build(StringBuilder s, NDArray array)
+        static void Build(StringBuilder s, NDArray array, int maxLength)
         {
             var shape = array.shape;
 
@@ -35,11 +35,11 @@ static void Build(StringBuilder s, NDArray array)
             var len = shape[0];
             s.Append("[");
 
-            if (len <= 10)
+            if (len <= maxLength)
             {
                 for (int i = 0; i < len; i++)
                 {
-                    Build(s, array[i]);
+                    Build(s, array[i], maxLength);
                     if (i < len - 1)
                     {
                         s.Append(", ");
@@ -49,9 +49,9 @@ static void Build(StringBuilder s, NDArray array)
             }
             else
             {
-                for (int i = 0; i < 5; i++)
+                for (int i = 0; i < maxLength / 2; i++)
                 {
-                    Build(s, array[i]);
+                    Build(s, array[i], maxLength);
                     if (i < len - 1)
                     {
                         s.Append(", ");
@@ -62,9 +62,9 @@ static void Build(StringBuilder s, NDArray array)
                 s.Append(" ... ");
                 s.AppendLine();
 
-                for (int i = (int)len - 5; i < len; i++)
+                for (int i = (int)len - maxLength / 2; i < len; i++)
                 {
-                    Build(s, array[i]);
+                    Build(s, array[i], maxLength);
                     if (i < len - 1)
                     {
                         s.Append(", ");
diff --git a/src/TensorFlowNET.Core/Numpy/Shape.cs b/src/TensorFlowNET.Core/Numpy/Shape.cs
index c339f12de..cbbf66b44 100644
--- a/src/TensorFlowNET.Core/Numpy/Shape.cs
+++ b/src/TensorFlowNET.Core/Numpy/Shape.cs
@@ -19,13 +19,14 @@ limitations under the License.
 using System.Collections.Generic;
 using System.Linq;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.Saving.Common;
 using Tensorflow.NumPy;
 
 namespace Tensorflow
 {
     [JsonConverter(typeof(CustomizedShapeJsonConverter))]
-    public class Shape
+    public class Shape : INestStructure<long>
     {
         public int ndim => _dims == null ? -1 : _dims.Length;
         long[] _dims;
@@ -41,6 +42,27 @@ public long[] strides
             }
         }
 
+        public NestType NestType => NestType.List;
+
+        public int ShallowNestedCount => ndim;
+        /// <summary>
+        /// The total item count of depth 1 of the nested structure.
+        /// For example, [1, 2, [3, 4, 5]] has TotalNestedCount = 5.
+        /// </summary>
+        public int TotalNestedCount => ndim;
+
+        public IEnumerable<long> Flatten() => dims.Select(x => x);
+
+        public INestStructure<TOut> MapStructure<TOut>(Func<long, TOut> func)
+        {
+            return new NestList<TOut>(dims.Select(x => func(x)));
+        }
+
+        public Nest<long> AsNest()
+        {
+            return new NestList<long>(Flatten()).AsNest();
+        }
+
         #region https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/proposals/csharp-8.0/ranges
         public int Length => ndim;
         public long[] Slice(int start, int length)
diff --git a/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs b/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs
new file mode 100644
index 000000000..202af652a
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/Initializers/NpyLoadInitializer.cs
@@ -0,0 +1,22 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.NumPy;
+
+namespace Tensorflow.Operations.Initializers
+{
+    /// <summary>
+    /// An initializer specially used for debugging (to load weights from disk).
+    /// </summary>
+    class NpyLoadInitializer : IInitializer
+    {
+        string _path;
+        public NpyLoadInitializer(string path) { _path = path; }
+        public string ClassName => "";
+        public IDictionary<string, object> Config => new Dictionary<string, object>();
+        public Tensor Apply(InitializerArgs args)
+        {
+            return np.load(_path);
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs
index 492047c9f..ae8733740 100644
--- a/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs
+++ b/src/TensorFlowNET.Core/Operations/Initializers/Orthogonal.cs
@@ -53,13 +53,12 @@ private Tensor _generate_init_val(Shape shape, TF_DataType dtype)
         // Compute the qr factorization
         var (q, r) = tf.linalg.qr(a, full_matrices: false);
         // Make Q uniform
-        var d = tf.linalg.tensor_diag_part(r);
+        var d = tf.linalg.tensor_diag_part(r.Single);
         q *= tf.sign(d);
 
         if (num_rows < num_cols)
         {
-            // q = tf.linalg.matrix_transpose(q);
-            throw new NotImplementedException("");
+            q = array_ops.matrix_transpose(q);
         }
 
         return _gain * tf.reshape(q, shape);
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
index d3592514d..16cbd0010 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicLSTMCell.cs
@@ -11,6 +11,7 @@ namespace Tensorflow
     /// Basic LSTM recurrent network cell.
     /// The implementation is based on: http://arxiv.org/abs/1409.2329.
     /// </summary>
+    [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
     public class BasicLstmCell : LayerRnnCell
     {
         int _num_units;
@@ -88,7 +89,7 @@ protected Tensors Call(Tensors inputs, Tensor state = null, bool is_training = f
             gate_inputs = nn_ops.bias_add(gate_inputs, _bias);
 
             // i = input_gate, j = new_input, f = forget_gate, o = output_gate
-            var tensors = array_ops.split(value: gate_inputs, num_split: 4, axis: one);
+            var tensors = array_ops.split(value: gate_inputs, num_or_size_splits: 4, axis: one);
             var (i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]);
 
             var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype);
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
index 17d51363f..3308aebb7 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/BasicRNNCell.cs
@@ -20,6 +20,7 @@ limitations under the License.
 
 namespace Tensorflow
 {
+    [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
     public class BasicRnnCell : LayerRnnCell
     {
         int _num_units;
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs
index 7394cb7f9..65de4fe90 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/LayerRNNCell.cs
@@ -19,6 +19,7 @@ limitations under the License.
 
 namespace Tensorflow
 {
+    [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
     public class LayerRnnCell : RnnCell
     {
         protected InputSpec inputSpec;
diff --git a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs
index ecc9ca116..e488c47e7 100644
--- a/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs
+++ b/src/TensorFlowNET.Core/Operations/NnOps/RNNCell.cs
@@ -16,10 +16,12 @@ limitations under the License.
 
 using System;
 using System.Collections.Generic;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Keras.Layers.Rnn;
 using Tensorflow.Keras.Saving;
 using Tensorflow.NumPy;
 using Tensorflow.Operations;
@@ -50,7 +52,8 @@ namespace Tensorflow
     /// matching structure of Tensors having shape `[batch_size].concatenate(s)`
     /// for each `s` in `self.batch_size`.
     /// </summary>
-    public abstract class RnnCell : ILayer, RNNArgs.IRnnArgCell
+    [Obsolete("This is an incompleted tf v1 api, pleas use keras RNNs instead.")]
+    public abstract class RnnCell : ILayer, IRnnCell
     {
         /// <summary>
         /// Attribute that indicates whether the cell is a TF RNN cell, due the slight
@@ -142,7 +145,7 @@ private Tensor _zero_state_tensors(object state_size, Tensor batch_size, TF_Data
             throw new NotImplementedException("_zero_state_tensors");
         }
 
-        public Tensors Apply(Tensors inputs, Tensor state = null, bool is_training = false)
+        public Tensors Apply(Tensors inputs, Tensors state = null, bool is_training = false, IOptionalArgs? optional_args = null)
         {
             throw new NotImplementedException();
         }
@@ -173,5 +176,18 @@ public void adapt(Tensor data, int? batch_size = null, int? steps = null)
         {
             throw new NotImplementedException();
         }
+
+        public (Tensor, Tensors) Call(Tensors inputs, Tensors states, bool? training = null)
+        {
+            throw new NotImplementedException();
+        }
+        public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid)
+        {
+            throw new NotImplementedException();
+        }
+        public INestStructure<long> StateSize => throw new NotImplementedException();
+        public INestStructure<long> OutputSize => throw new NotImplementedException();
+        public bool IsTFRnnCell => throw new NotImplementedException();
+        public bool SupportOptionalArgs => throw new NotImplementedException();
     }
 }
diff --git a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
index 76a222ba3..29e1f074f 100644
--- a/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
+++ b/src/TensorFlowNET.Core/Operations/OpDefLibrary.cs
@@ -15,9 +15,11 @@ limitations under the License.
 ******************************************************************************/
 
 using Google.Protobuf;
+using Google.Protobuf.Collections;
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Functions;
 using static Tensorflow.Binding;
 using static Tensorflow.OpDef.Types;
 
@@ -387,9 +389,13 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value)
                 case "list(type)":
                     attr_value.List.Type.AddRange((value as IList<TF_DataType>).Select(x => _MakeType(x, attr_def)));
                     break;
+                case "list(float)":
+                    if (value != null)
+                        attr_value.List.F.AddRange((value as IEnumerable<float>).ToArray());
+                    break;
                 case "list(int)":
                     if (value != null)
-                        attr_value.List.I.AddRange((value as int[]).Select(x => Convert.ToInt64(x)));
+                        attr_value.List.I.AddRange((value as IEnumerable<int>).Select(x => Convert.ToInt64(x)));
                     break;
                 case "bool":
                     attr_value.B = (bool)value;
@@ -420,6 +426,15 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value)
                 case "list(shape)":
                     attr_value.List.Shape.AddRange((value as Shape[]).Select(x => _MakeShape(x, attr_def)));
                     break;
+                case "func":
+                    attr_value.Func = _MakeFunc(value, attr_def.Name);
+                    break;
+                case "list(func)":
+                    attr_value.List.Func.AddRange(_MakeFuncList(value, attr_def.Name));
+                    break;
+                case "list(string)":
+                    attr_value.List.S.AddRange((value as IEnumerable<string>).Select(x => ByteString.CopyFromUtf8(x)));
+                    break;
                 default:
                     throw new TypeError($"SetAttrValue: can't not convert attr_def.Type '{attr_def.Type}' to protos.");
             }
@@ -427,6 +442,47 @@ private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value)
             return attr_value;
         }
 
+        private NameAttrList _MakeFunc(object func, string arg_name)
+        {
+            if(func is NameAttrList attrList)
+            {
+                return attrList;
+            }
+            NameAttrList fn_attr;
+            if(func is string funcStr)
+            {
+                fn_attr = new NameAttrList() { Name = funcStr };
+            }
+            else if(func is ConcreteFunction concrete)
+            {
+                concrete.AddTograph(ops.get_default_graph());
+                fn_attr = concrete.AsNameAttrList;
+            }
+            else if(func is EagerDefinedFunction eager)
+            {
+                eager.AddToGraph(ops.get_default_graph());
+                fn_attr = new NameAttrList() { Name = eager.Name };
+            }
+            else
+            {
+                throw new TypeError($"Don't know how to convert {func} to a func for argument {arg_name}");
+            }
+            return fn_attr;
+        }
+
+        private List<NameAttrList> _MakeFuncList(object funcList, string arg_name)
+        {
+            List<NameAttrList> res = new List<NameAttrList>();
+            if(funcList is IEnumerable enumerable)
+            {
+                foreach(var func in enumerable)
+                {
+                    res.Add(_MakeFunc(func, arg_name));
+                }
+            }
+            return res;
+        }
+
         private bool _IsListParameter(ArgDef arg)
         {
             if (!String.IsNullOrEmpty(arg.NumberAttr))
diff --git a/src/TensorFlowNET.Core/Operations/Operation.Output.cs b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
index 2955a13fa..2329a4786 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.Output.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.Output.cs
@@ -34,7 +34,7 @@ public int OutputListLength(string name)
             return num;
         }
 
-        protected Tensor[] _outputs;
+        internal Tensor[] _outputs;
         public virtual Tensor[] outputs => _outputs;
         public Tensor output => _outputs.FirstOrDefault();
 
diff --git a/src/TensorFlowNET.Core/Operations/Operation.cs b/src/TensorFlowNET.Core/Operations/Operation.cs
index a789c5f4b..d31b26d4a 100644
--- a/src/TensorFlowNET.Core/Operations/Operation.cs
+++ b/src/TensorFlowNET.Core/Operations/Operation.cs
@@ -46,9 +46,9 @@ namespace Tensorflow
     /// </summary>
     public partial class Operation : ITensorOrOperation
     {
-        private readonly IntPtr _handle; // _c_op in python
+        protected IntPtr _handle; // _c_op in python
 
-        private readonly Graph _graph;
+        protected Graph _graph;
 
         internal Func<Operation, object[], Tensor[]> _gradient_function;
 
@@ -69,6 +69,7 @@ public partial class Operation : ITensorOrOperation
         //private OperationDescription _op_desc;
 
         public NodeDef node_def => GetNodeDef();
+        protected Operation() { }
 
         public Operation(IntPtr handle, Graph g = null)
         {
@@ -185,7 +186,16 @@ public void run(FeedItem[] feed_dict = null, Session session = null)
         }
 
         public virtual T get_attr<T>(string name)
-            => (T)get_attr(name);
+        {
+            if (typeof(T).IsValueType)
+            {
+                return (T)Convert.ChangeType(get_attr(name), typeof(T));
+            }
+            else
+            {
+                return (T)get_attr(name);
+            }
+        }
 
         internal unsafe TF_DataType _get_attr_type(string name)
         {
diff --git a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs
index cf1b50af6..591760600 100644
--- a/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs
+++ b/src/TensorFlowNET.Core/Operations/_EagerTensorArray.cs
@@ -17,6 +17,8 @@ limitations under the License.
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
+using Tensorflow.Eager;
 using Tensorflow.Framework;
 using static Tensorflow.Binding;
 
@@ -37,10 +39,6 @@ public class _EagerTensorArray : TensorArray
 
         bool _infer_shape;
         public override bool infer_shape => _infer_shape;
-        public bool _dynamic_size;
-        public Shape _element_shape;
-
-        public List<Tensor> _colocate_with;
 
         Tensor _handle;
         public override Tensor handle => _handle;
@@ -48,12 +46,14 @@ public class _EagerTensorArray : TensorArray
         public override Tensor flow => _flow;
         bool _clear_after_read;
         List<Tensor> _tensor_array;
+        List<int> _previous_read_indices;
 
         public _EagerTensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = false,
             bool clear_after_read = true, string tensor_array_name = null, Tensor handle = null, Tensor flow = null,
             bool infer_shape = true, Shape? element_shape = null,
             bool colocate_with_first_write_call = true, string name = null)
         {
+            _size = size;
             _flow = constant_op.constant(0);
             _infer_shape = infer_shape;
             _element_shape = element_shape ?? Shape.Null;
@@ -61,16 +61,20 @@ public _EagerTensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = fal
             _dtype = dtype.as_base_dtype();
             _dynamic_size = dynamic_size;
             _clear_after_read = clear_after_read;
-            _tensor_array = new List<Tensor>();
+            _tensor_array = Enumerable.Repeat<Tensor>(null, size.numpy()).ToList();
+            _previous_read_indices = new();
         }
 
         public override TensorArray unstack(Tensor value, string name = null)
         {
-            return tf_with(ops.name_scope(name, "TensorArrayUnstack", new { _handle, value }), delegate
+            var tensors = array_ops.unstack(value, name: name);
+            if(tensors.Length > _tensor_array.Count && !_dynamic_size)
             {
-                var num_elements = array_ops.shape(value)[0];
-                return scatter(indices: math_ops.range(0, num_elements), value: value, name: name);
-            });
+                throw new ValueError($"Cannot unstack {tensors.Length} tensors into a TensorArray of static size {_tensor_array.Count}");
+            }
+            _tensor_array = tensors.ToList();
+            // TODO(Rinne): revise the implementation. Here we should return `parent()`.
+            return this;
         }
 
         public TensorArray scatter(Tensor indices, Tensor value, string name = null)
@@ -103,7 +107,19 @@ public TensorArray scatter(Tensor indices, Tensor value, string name = null)
 
                 return ta;
             });*/
-            throw new NotImplementedException("");
+            //if (indices is EagerTensor)
+            //{
+            //    indices = indices as EagerTensor;
+            //    indices = indices.numpy();
+            //}
+
+            //foreach (var (index, val) in zip(indices.ToArray<int>(), array_ops.unstack(value)))
+            //{
+            //    this.write(index, val);
+            //}
+            //return base;
+            //throw new NotImplementedException("");
+            return this;
         }
 
         public void _merge_element_shape(Shape shape)
@@ -116,9 +132,19 @@ public void _maybe_colocate_with(Tensor value)
             _colocate_with.Add(value);
         }
 
+        private Tensor _maybe_zero(int ix)
+        {
+            var val = _tensor_array[ix];
+            if(val is null)
+            {
+                val = _tensor_array[ix] = array_ops.zeros(_element_shape, _dtype);
+            }
+            return val;
+        }
+
         public override Tensor read<T>(T index, string name = null)
         {
-            int index_int = -1;
+            int index_int;
             if (index is int int_index)
                 index_int = int_index;
             else if (index is Tensor tensor_index)
@@ -126,27 +152,75 @@ public override Tensor read<T>(T index, string name = null)
             else
                 throw new ValueError("");
 
+            if(index_int >= _tensor_array.Count)
+            {
+                throw new OutOfRangeError($"Tried to read from index {index_int} but array size is: {_tensor_array.Count} ");
+            }
+
+            var res = _tensor_array[index_int];
+            if(res is null)
+            {
+                if (_previous_read_indices.Contains(index_int))
+                {
+                    throw new InvalidArgumentError($"Could not read index {index_int} twice because it was cleared after " +
+                        $"a previous read (perhaps try setting clear_after_read = false?)");
+                }
+                else
+                {
+                    res = _maybe_zero(index_int);
+                }
+            }
+
             if (_clear_after_read)
             {
                 _tensor_array[index_int] = null;
+                _previous_read_indices.Add(index_int);
             }
-
-            return _tensor_array[index_int];
+            return res;
         }
 
         public override TensorArray write(Tensor index, Tensor value, string name = null)
         {
-            if (_infer_shape)
-                _element_shape = _element_shape.merge_with(value.shape);
-            _tensor_array.add(value);
-            return this;
+            int index_int;
+            if(index is EagerTensor eager)
+            {
+                return write<Tensor>(eager.numpy(), value, name);
+            }
+            throw new InvalidArgumentError("The index is supposed to be an EagerTensor");
         }
 
         public override TensorArray write<T>(int index, T value, string name = null)
         {
-            var value_tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
-            var index_tensor = ops.convert_to_tensor(index, name: "index");
-            return write(index_tensor, value_tensor, name: name);
+            int size = _tensor_array.Count;
+            if(index >= size)
+            {
+                if (!_dynamic_size)
+                {
+                    throw new OutOfRangeError($"Tried to write to index {index} but array is not resizeable and size " +
+                        $"is: {size} ");
+                }
+                _tensor_array.AddRange(Enumerable.Repeat<Tensor>(null, index - size + 1));
+            }
+
+            Tensor tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
+            
+            if(_dtype != tensor.dtype)
+            {
+                throw new InvalidArgumentError($"TensorArray dtype is {_dtype.as_python_name()} but Op is " +
+                    $"trying to write dtype {tensor.dtype.as_python_name()} ");
+            }
+
+            if (!_element_shape.is_compatible_with(tensor.shape))
+            {
+                throw new ValueError($"Incompatible shape for value ({tensor.shape}), expected ({_element_shape})");
+            }
+
+            if (_infer_shape)
+            {
+                _element_shape = _element_shape.merge_with(tensor.shape);
+            }
+            _tensor_array[index] = tensor;
+            return this;
         }
 
         private Tensor size(string name = null)
@@ -156,11 +230,26 @@ private Tensor size(string name = null)
 
         public override Tensor stack(string name = null)
         {
-            ops.colocate_with(_handle);
-            return tf_with(ops.name_scope(name, "TensorArrayStack", new { _handle }), delegate
+            if(_tensor_array.Count > 0)
+            {
+                for(int i = 0; i < _tensor_array.Count; i++)
+                {
+                    _maybe_zero(i);
+                }
+            }
+            if(_tensor_array.Count == 0 && _element_shape.IsFullyDefined)
+            {
+                return ops.convert_to_tensor(new Shape(new long[] { 0 }.Concat(_element_shape.dims).ToArray()), name: name, dtype: _dtype);
+            }
+            else
             {
-                return gather(math_ops.range(0, size()), name: name);
-            });
+                return ops.convert_to_tensor(_tensor_array, name: name, dtype: _dtype);
+            }
+            //ops.colocate_with(_handle);
+            //return tf_with(ops.name_scope(name, "TensorArrayStack", new { _handle }), delegate
+            //{
+            //    return gather(math_ops.range(0, size()), name: name);
+            //});
         }
 
         public override Tensor gather(Tensor indices, string name = null)
diff --git a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
index 16870e9f6..2384e8146 100644
--- a/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
+++ b/src/TensorFlowNET.Core/Operations/_GraphTensorArray.cs
@@ -16,7 +16,10 @@ limitations under the License.
 
 using System;
 using System.Collections.Generic;
+using System.Diagnostics;
 using System.Linq;
+using Tensorflow.Common.Types;
+using Tensorflow.Eager;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Operations
@@ -32,18 +35,18 @@ public class _GraphTensorArray : TensorArray
         /// first tensor written to it.
         /// </summary>
         bool _colocate_with_first_write_call;
-        public bool colocate_with_first_write_call => _colocate_with_first_write_call;
+        public override bool colocate_with_first_write_call => _colocate_with_first_write_call;
 
         bool _infer_shape;
-        public bool infer_shape => _infer_shape;
-        public bool _dynamic_size;
+        public override bool infer_shape => _infer_shape;
         public List<Shape> _element_shape;
 
         public List<Tensor> _colocate_with;
 
         internal Tensor _handle;
-        public Tensor handle => _handle;
+        public override Tensor handle => _handle;
         internal Tensor _flow;
+        public override Tensor flow => _flow;
 
         public _GraphTensorArray(TF_DataType dtype, Tensor size, bool? dynamic_size = null,
             bool? clear_after_read = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null,
@@ -54,6 +57,7 @@ public _GraphTensorArray(TF_DataType dtype, Tensor size, bool? dynamic_size = nu
             dynamic_size = dynamic_size ?? false;
             _dynamic_size = dynamic_size.Value;
             _dtype = dtype;
+            _size = size;
 
             _colocate_with_first_write_call = colocate_with_first_write_call;
             if (colocate_with_first_write_call)
@@ -146,7 +150,9 @@ public TensorArray scatter(Tensor indices, Tensor value, string name = null)
 
                 return ta;
             });*/
-            throw new NotImplementedException("");
+
+            //throw new NotImplementedException("");
+            return this;
         }
 
         public void _merge_element_shape(Shape shape)
@@ -232,4 +238,173 @@ public override Tensor gather(Tensor indices, string name = null)
             return value;
         }
     }
+
+    public class _GraphTensorArrayV2 : TensorArray
+    {
+        internal TF_DataType _dtype;
+        public override TF_DataType dtype => _dtype;
+
+        /// <summary>
+        /// Used to keep track of what tensors the TensorArray should be
+        /// colocated with.  We choose to colocate the TensorArray with the
+        /// first tensor written to it.
+        /// </summary>
+        bool _colocate_with_first_write_call;
+        public override bool colocate_with_first_write_call => _colocate_with_first_write_call;
+
+        bool _infer_shape;
+        public override bool infer_shape => _infer_shape;
+        public Shape _element_shape;
+
+        public List<Tensor> _colocate_with;
+
+        internal Tensor _handle;
+        public override Tensor handle => _handle;
+        internal Tensor _flow;
+        public override Tensor flow => _flow;
+
+        public _GraphTensorArrayV2(TF_DataType dtype, Tensor size, bool? dynamic_size = null,
+            bool? clear_after_read = null, string tensor_array_name = null, Tensor handle = null, Tensor flow = null,
+            bool infer_shape = true, Shape? element_shape = null,
+            bool colocate_with_first_write_call = true, string name = null)
+        {
+            Debug.Assert(handle is null);
+            dynamic_size = dynamic_size ?? false;
+            _dynamic_size = dynamic_size.Value;
+            _size = size;
+
+            if(flow is not null && flow.dtype != dtypes.variant)
+            {
+                throw new TypeError($"Expected `flow` to be a variant tensor, but received `{flow.dtype}` instead");
+            }
+            if(flow is null && size is null)
+            {
+                throw new ValueError("Argument `size` must be provided if argument `flow` is not provided.");
+            }
+            if(flow is not null && size is not null)
+            {
+                throw new ValueError("Cannot provide both `flow` and `size` arguments at the same time.");
+            }
+            if(flow is not null && element_shape is not null)
+            {
+                throw new ValueError("Cannot provide both `flow` and `element_shape` arguments at the same time.");
+            }
+
+            _dtype = dtype;
+
+            _element_shape = element_shape;
+            _infer_shape = infer_shape;
+            tf_with(ops.name_scope(name, "TensorArrayV2", new object[] { size, flow }), scope =>
+            {
+                if (flow is null)
+                {
+                    _flow = list_ops.tensor_list_reserve(element_shape, size, dtype, scope.scope_name);
+                }
+                else
+                {
+                    _flow = flow;
+                }
+            });
+
+            _colocate_with_first_write_call = false;
+            _colocate_with = null;
+        }
+
+        public override TensorArray unstack(Tensor value, string name = null)
+        {
+            return tf_with(ops.name_scope(name, "TensorArrayUnstack", new { _flow, value }), delegate
+            {
+                value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
+                Debug.Assert(value.dtype == _dtype);
+                var flow_out = list_ops.tensor_list_from_tensor(value, value.shape.dims.Skip(1).ToArray());
+                return tensor_array_ops.build_ta_with_new_flow(this, flow_out);
+            });
+        }
+
+        public TensorArray scatter(Tensor indices, Tensor value, string name = null)
+        {
+            return tf_with(ops.name_scope(name, "TensorArrayScatter", new { _flow, value, indices }), delegate
+            {
+                value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
+                Debug.Assert(value.dtype == _dtype);
+                var flow_out = list_ops.tensor_list_scatter(value, indices, _element_shape, _flow);
+                return tensor_array_ops.build_ta_with_new_flow(this, flow_out);
+            });
+        }
+
+        public override Tensor read<T>(T index, string name = null)
+        {
+            if(index is Tensor tensor)
+            {
+                return read(tensor, name);
+            }
+            else
+            {
+                throw new TypeError("Please use non-generic method instead.");
+            }
+        }
+
+        public Tensor read(Tensor index, string name = null)
+        {
+            return tf_with(tf.name_scope(name, "TensorArrayV2Read", new object[] { _flow, index }), scope =>
+            {
+                return list_ops.tensor_list_get_item(_flow, index, _dtype, _element_shape, name);
+            });
+        }
+
+        public override TensorArray write(Tensor index, Tensor value, string name = null)
+        {
+            return tf_with(ops.name_scope(name, "TensorArrayV2Write", new { _flow, index, value }), delegate
+            {
+                value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
+                Debug.Assert(value.dtype == _dtype);
+                var flow_out = list_ops.tensor_list_set_item(_flow, index, value, _dynamic_size, name);
+
+                return tensor_array_ops.build_ta_with_new_flow(this, flow_out);
+            });
+        }
+
+        public override TensorArray write<T>(int index, T value, string name = null)
+        {
+            var value_tensor = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value");
+            var index_tensor = ops.convert_to_tensor(index, name: "index");
+            return write(index_tensor, value_tensor);
+        }
+
+        private Tensor size(string name = null)
+        {
+            if(!_dynamic_size && _size is not null)
+            {
+                return ops.convert_to_tensor(_size, dtypes.int32);
+            }
+            else
+            {
+                return gen_list_ops.tensor_list_length(_flow, name);
+            }
+        }
+
+        public override Tensor stack(string name = null)
+        {
+            return tf_with(ops.name_scope(name, "TensorArrayV2Stack", _flow), delegate
+            {
+                int ta_size;
+                if(!_dynamic_size && (_size is not null))
+                {
+                    var size_tensor = tensor_util.constant_value(_size);
+                    ta_size = size_tensor is null ? -1 : (int)size_tensor;
+                }
+                else
+                {
+                    ta_size = -1;
+                }
+                var value = list_ops.tensor_list_stack(_flow, _dtype, ta_size, _element_shape);
+                return value;
+            });
+        }
+
+        public override Tensor gather(Tensor indices, string name = null)
+        {
+            return list_ops.tensor_list_gather(_flow, indices, _dtype, _element_shape, name);
+        }
+    }
 }
diff --git a/src/TensorFlowNET.Core/Operations/array_ops.cs b/src/TensorFlowNET.Core/Operations/array_ops.cs
index a0b47aace..6b4fea63a 100644
--- a/src/TensorFlowNET.Core/Operations/array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/array_ops.cs
@@ -119,6 +119,27 @@ public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT
             }
         }
 
+        public static Tensor zeros(Tensors shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null)
+        {
+            dtype = dtype.as_base_dtype();
+            Tensor shapeTensor;
+            if(shape.Length > 1)
+            {
+                shapeTensor = ops.convert_to_tensor(shape, dtypes.int32);
+                if(shapeTensor.ndim > 1)
+                {
+                    shapeTensor = array_ops.reshape(shapeTensor, new Shape(-1));
+                }
+            }
+            else
+            {
+                shapeTensor = shape[0];
+            }
+            var output = fill(shapeTensor, array_ops.constant(0, dtype), name);
+            Debug.Assert(output.dtype.as_base_dtype() == dtype);
+            return output;
+        }
+
         public static Tensor boolean_mask<T1, T2>(T1 tensor, T2 mask, string name = "boolean_mask", int axis = 0)
         {
             return tf_with(ops.name_scope(name, values: new { tensor, mask }), delegate
@@ -307,6 +328,9 @@ public static Tensor expand_dims(Tensor input, int axis = -1, string name = null
         public static Tensor fill<T>(Shape dims, T value, string name = null)
             => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
 
+        public static Tensor fill<T>(Tensor dims, T value, string name = null)
+            => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
+
         /// <summary>
         /// Returns the rank of a tensor.
         /// </summary>
@@ -947,38 +971,70 @@ public static Tensor transpose(Tensor a, Tensor perm, string name = "transpose",
             });
         }
 
-        public static Tensor[] split(Tensor value, Tensor size_splits, int axis, int num = -1,
-            string name = "split")
+        /// <summary>
+        /// Transposes last two dimensions of tensor `a`.
+        /// For example:
+        /// <code> python
+        ///   x = tf.constant([[1, 2, 3], [4, 5, 6]])
+        ///   tf.matrix_transpose(x) # [[1, 4],
+        ///                         #  [2, 5],
+        ///                         #  [3, 6]]
+        /// </code>
+        /// Matrix with two batch dimensions.
+        /// x.shape is [1, 2, 3, 4]
+        /// tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
+        /// </summary>
+        /// <param name="a"></param>
+        /// <param name="name"></param>
+        /// <param name="conjugate"></param>
+        /// <returns></returns>
+        /// <exception cref="ValueError"></exception>
+        public static Tensor matrix_transpose(Tensor a, string name = "matrix_transpose", bool conjugate = false)
         {
-            if (num == -1)
-                num = (int)size_splits.shape[0];
-
-            return gen_array_ops.split_v(value, size_splits, tf.convert_to_tensor(axis), num, name: name);
+            return tf_with(ops.name_scope(name, "transpose", new { a }), scope =>
+            {
+                var a_shape = a.shape;
+                var ndims = a.shape.ndim;
+                Axis perm;
+                if(ndims != 0)
+                {
+                    if (ndims < 2)
+                    {
+                        throw new ValueError("Argument `a` should be a (batch) matrix with rank " +
+                            $">= 2.  Received `a` = {a} with shape: {a_shape}");
+                    }
+                    perm = new Axis(Enumerable.Range(0, ndims - 2).Concat(new int[] { ndims - 1, ndims - 2 }).ToArray());
+                }
+                else
+                {
+                    var a_rank = a.rank;
+                    perm = new Axis(Enumerable.Range(0, a_rank - 2).Concat(new int[] { a_rank - 1, a_rank - 2 }).ToArray());
+                }
+                return transpose(a, perm:perm, conjugate:conjugate);
+            });
         }
 
-        public static Tensor[] split<T>(Tensor value, int num_split, T axis,
+        public static Tensor[] split(Tensor value, int num_or_size_splits, Tensor axis = null,
             string name = "split")
         {
-            var size_splits = ops.convert_to_tensor(num_split);
+            return gen_array_ops.split(split_dim: axis, value: value, num_split: num_or_size_splits, name);
+        }
 
-            if (tf.Context.executing_eagerly())
+        public static Tensor[] split(Tensor value, int[] num_or_size_splits, Tensor axis = null, int num = -1,
+            string name = "split")
+        {
+            if(num_or_size_splits.Length == 0)
             {
-                return split_eager_fallback(axis, value, num_split: num_split, name: name, ctx: tf.Context);
+                throw new ValueError("Rank-0 tensors are not supported as the num_or_size_splits argument to split.");
             }
+            var size_splits = ops.convert_to_tensor(num_or_size_splits);
 
-            var _op = tf.OpDefLib._apply_op_helper("Split", name, new { split_dim = axis, value, num_split });
-            return _op.outputs;
-        }
-
-        private static Tensor[] split_eager_fallback<Ta, Tv>(Ta axis, Tv value, int num_split, string name, Context ctx = null)
-        {
-            var (_attr_T, input) = tf.Runner.ArgsToMatchingEager(ctx, args: new object[] { value });
-            var axis_tensor = ops.convert_to_tensor(axis, dtype: TF_DataType.TF_INT32);
-            var _inputs_flat = new List<Tensor> { axis_tensor };
-            _inputs_flat.AddRange(input);
-            var _attrs = new object[] { "num_split", num_split, "T", _attr_T };
+            if(num == -1)
+            {
+                num = (int)size_splits.shape[0];
+            }
 
-            return tf.Runner.Execute(ctx, "Split", num_split, _inputs_flat.ToArray(), _attrs, name: name);
+            return gen_array_ops.split_v(value: value, size_splits: size_splits, split_dim: axis, num_split: num, name: name);
         }
 
         public static Tensor slice(Tensor input, Tensor[] begin, Tensor[] size, string name = null)
diff --git a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
index 862b636fd..efd9aba35 100644
--- a/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/control_flow_ops.cs
@@ -675,16 +675,17 @@ public static Tensor ZerosLikeOutsideLoop(Operation op, int index)
             }
         }
 
-        public static Tensor[] while_loop(Func<Tensor[], Tensor> cond,
-            Func<Tensor[], Tensor[]> body,
-            Tensor[] loop_vars,
+        public static Tensors while_loop(Func<Tensors, Tensor> cond,
+            Func<Tensors, Tensors> body,
+            Tensors loop_vars,
             int parallel_iterations = 10,
             string name = null)
         {
             var executing_eagerly = tf.Context.executing_eagerly();
             if (!executing_eagerly)
             {
-                throw new NotImplementedException("");
+                return while_v2.while_loop(cond, body, loop_vars, parallel_iterations: parallel_iterations,
+                    name: name);
             }
 
             return tf_with(ops.name_scope("name", "while"), delegate
diff --git a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
index c88911194..536d4e3c2 100644
--- a/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
+++ b/src/TensorFlowNET.Core/Operations/control_flow_util.py.cs
@@ -16,12 +16,20 @@ limitations under the License.
 
 using System;
 using System.Linq;
+using Tensorflow.Functions;
+using Tensorflow.Graphs;
 using Tensorflow.Operations;
+using static Tensorflow.Binding;
 
 namespace Tensorflow
 {
     public class control_flow_util
     {
+        public static readonly bool ENABLE_CONTROL_FLOW_V2 = !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2") != "0" ||
+                              (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_CONTROL_FLOW_V2") != "0") ||
+                              (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_COND_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_COND_V2") != "0") ||
+                              (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_WHILE_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_WHILE_V2") != "0") ||
+                              (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_ENABLE_TENSOR_ARRAY_V2")) && Environment.GetEnvironmentVariable("TF_ENABLE_TENSOR_ARRAY_V2") != "0");
         /// <summary>
         /// Return true if `op` is an Exit.
         /// </summary>
@@ -196,5 +204,74 @@ public static WhileContext GetContainingWhileContext(ControlFlowContext ctxt, Co
             }
             return null;
         }
+
+        public static bool EnableControlFlowV2(Graph graph)
+        {
+            return ENABLE_CONTROL_FLOW_V2 || graph.building_function && (graph is not FuncGraph func || func.captures.Length == 0);
+            
+        }
+
+        public static string create_new_tf_function(FuncGraph func_graph)
+        {
+            var func = new EagerDefinedFunction(func_graph.Name, func_graph, func_graph.Inputs, func_graph.Outputs, new Dictionary<string, AttrValue>());
+            func.AddToGraph(func_graph);
+            return func_graph.Name;
+        }
+
+        public static (Operation, Tensor[]) get_op_and_outputs(Tensor[] inputs)
+        {
+            if(inputs.Length == 0)
+            {
+                return (null, new Tensor[0]);
+            }
+            else
+            {
+                return (inputs[0], inputs);
+            }
+        }
+
+        public static Tensor[] run_as_function_for_tape_gradients(Func<Tensor[], Tensor[]> make_op, Tensor[] inputs)
+        {
+            if(gradients_util.PossibleTapeGradientTypes(inputs) == gradients_util.POSSIBLE_GRADIENT_TYPES_HIGHER_ORDER
+                && !(ops.get_default_graph().building_function))
+            {
+                throw new NotImplementedException();
+            }
+            else
+            {
+                return make_op(inputs);
+            }
+        }
+
+        public static string unique_fn_name(string scope, string name)
+        {
+            return $"{scope}{name}_{ops.uid()}".Replace("/", "_");
+        }
+
+        public static bool output_all_intermediates()
+        {
+            if (in_defun())
+            {
+                return false;
+            }
+            if(tf.Context.FunctionCallOptions.ExecutorType == "SINGLE_THREADED_EXECUTOR")
+            {
+                return false;
+            }
+            // TODO(Rinne): check this after refactoring keras building.
+            return false;
+        }
+
+        public static bool in_defun()
+        {
+            if (tf.Context.executing_eagerly())
+            {
+                return false;
+            }
+
+            var graph = ops.get_default_graph();
+            // TODO(Rinne): CondBranchFuncGraph, WhileBodyFuncGraph, WhileCondFuncGraph
+            return graph is FuncGraph;
+        }
     }
 }
diff --git a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
index 9810d32f3..8367c2f94 100644
--- a/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_array_ops.cs
@@ -2,6 +2,7 @@
 
 using Tensorflow.Eager;
 using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
 namespace Tensorflow;
@@ -25,6 +26,10 @@ public static Tensor batch_matrix_band_part(Tensor input, Tensor num_lower, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -76,6 +81,10 @@ public static Tensor batch_matrix_diag(Tensor diagonal, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -125,6 +134,10 @@ public static Tensor batch_matrix_diag_part(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -175,6 +188,10 @@ public static Tensor batch_matrix_set_diag(Tensor input, Tensor diagonal, string
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -238,6 +255,10 @@ public static Tensor batch_to_space(Tensor input, Tensor crops, int block_size =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpace", name) { args = new object[] { input, crops }, attrs = new Dictionary<string, object>() { ["block_size"] = block_size } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -301,6 +322,10 @@ public static Tensor batch_to_space_nd(Tensor input, Tensor block_shape, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchToSpaceND", name) { args = new object[] { input, block_shape, crops }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -407,6 +432,10 @@ public static Tensor bitcast(Tensor input, TF_DataType type, string? name = null
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bitcast", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["type"] = type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -464,6 +493,10 @@ public static Tensor broadcast_args(Tensor s0, Tensor s1, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -520,6 +553,10 @@ public static Tensor[] broadcast_gradient_args(Tensor s0, Tensor s1, string? nam
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastGradientArgs", name) { args = new object[] { s0, s1 }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -607,6 +644,10 @@ public static Tensor broadcast_to(Tensor input, Tensor shape, string? name = nul
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BroadcastTo", name) { args = new object[] { input, shape }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -689,6 +730,10 @@ public static Tensor check_numerics(Tensor tensor, string message, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumerics", name) { args = new object[] { tensor }, attrs = new Dictionary<string, object>() { ["message"] = message } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -752,6 +797,10 @@ public static Tensor check_numerics_v2(Tensor tensor, string message, string? na
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CheckNumericsV2", name) { args = new object[] { tensor }, attrs = new Dictionary<string, object>() { ["message"] = message } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -803,6 +852,10 @@ public static Tensor concat(Tensor concat_dim, Tensors values, string? name = nu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Concat", name) { args = new object[] { concat_dim, values }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -871,6 +924,10 @@ public static Tensor[] concat_offset(Tensor concat_dim, Tensors shape, string? n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatOffset", name) { args = new object[] { concat_dim, shape }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -925,6 +982,10 @@ public static Tensor concat_v2(Tensors values, Tensor axis, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConcatV2", name) { args = new object[] { values, axis }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -986,6 +1047,10 @@ public static Tensor conjugate_transpose(Tensor x, Tensor perm, string? name = n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConjugateTranspose", name) { args = new object[] { x, perm }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1041,6 +1106,10 @@ public static Tensor _const(TensorProto value, TF_DataType dtype, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Const", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["value"] = value, ["dtype"] = dtype } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1098,6 +1167,10 @@ public static Tensor debug_gradient_identity(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DebugGradientIdentity", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1182,6 +1255,10 @@ public static Tensor deep_copy(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeepCopy", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1330,6 +1407,10 @@ public static Tensor depth_to_space(Tensor input, int block_size = 0, string dat
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthToSpace", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["block_size"] = block_size, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1452,6 +1533,10 @@ public static Tensor dequantize(Tensor input, Tensor min_range, Tensor max_range
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dequantize", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary<string, object>() { ["mode"] = mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["dtype"] = dtype } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1532,6 +1617,10 @@ public static Tensor diag(Tensor diagonal, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Diag", name) { args = new object[] { diagonal }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1603,6 +1692,10 @@ public static Tensor diag_part(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DiagPart", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1674,6 +1767,10 @@ public static Tensor edit_distance(Tensor hypothesis_indices, Tensor hypothesis_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EditDistance", name) { args = new object[] { hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape }, attrs = new Dictionary<string, object>() { ["normalize"] = normalize } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1731,6 +1828,10 @@ public static Tensor empty(Tensor shape, TF_DataType dtype, bool init = false, s
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Empty", name) { args = new object[] { shape }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype, ["init"] = init } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1793,6 +1894,10 @@ public static Tensor ensure_shape(Tensor input, Shape shape, string? name = null
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EnsureShape", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1878,6 +1983,10 @@ public static Tensor expand_dims(Tensor input, Tensor dim, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExpandDims", name) { args = new object[] { input, dim }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1954,6 +2063,10 @@ public static Tensor extract_image_patches(Tensor images, int[] ksizes, int[] st
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractImagePatches", name) { args = new object[] { images }, attrs = new Dictionary<string, object>() { ["ksizes"] = ksizes, ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2030,6 +2143,10 @@ public static Tensor extract_volume_patches(Tensor input, int[] ksizes, int[] st
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ExtractVolumePatches", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["ksizes"] = ksizes, ["strides"] = strides, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2110,6 +2227,10 @@ public static Tensor fake_quant_with_min_max_args(Tensor inputs, float min = -6f
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgs", name) { args = new object[] { inputs }, attrs = new Dictionary<string, object>() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2168,6 +2289,10 @@ public static Tensor fake_quant_with_min_max_args_gradient(Tensor gradients, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxArgsGradient", name) { args = new object[] { gradients, inputs }, attrs = new Dictionary<string, object>() { ["min"] = min, ["max"] = max, ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2254,6 +2379,10 @@ public static Tensor fake_quant_with_min_max_vars(Tensor inputs, Tensor min, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVars", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary<string, object>() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2320,6 +2449,10 @@ public static Tensor[] fake_quant_with_min_max_vars_gradient(Tensor gradients, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary<string, object>() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2407,6 +2540,10 @@ public static Tensor fake_quant_with_min_max_vars_per_channel(Tensor inputs, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannel", name) { args = new object[] { inputs, min, max }, attrs = new Dictionary<string, object>() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2473,6 +2610,10 @@ public static Tensor[] fake_quant_with_min_max_vars_per_channel_gradient(Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeQuantWithMinMaxVarsPerChannelGradient", name) { args = new object[] { gradients, inputs, min, max }, attrs = new Dictionary<string, object>() { ["num_bits"] = num_bits, ["narrow_range"] = narrow_range } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2551,6 +2692,10 @@ public static Tensor fill(Tensor dims, Tensor value, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fill", name) { args = new object[] { dims, value }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2636,6 +2781,10 @@ public static Tensor fingerprint(Tensor data, Tensor method, string? name = null
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Fingerprint", name) { args = new object[] { data, method }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2717,6 +2866,10 @@ public static Tensor gather(Tensor params_, Tensor indices, bool validate_indice
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Gather", name) { args = new object[] { params_, indices }, attrs = new Dictionary<string, object>() { ["validate_indices"] = validate_indices } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2877,6 +3030,10 @@ public static Tensor gather_nd(Tensor params_, Tensor indices, string? name = nu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherNd", name) { args = new object[] { params_, indices }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2961,6 +3118,10 @@ public static Tensor gather_v2(Tensor params_, Tensor indices, Tensor axis, int
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GatherV2", name) { args = new object[] { params_, indices, axis }, attrs = new Dictionary<string, object>() { ["batch_dims"] = batch_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3023,6 +3184,10 @@ public static Tensor guarantee_const(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GuaranteeConst", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3072,6 +3237,10 @@ public static Tensor identity(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Identity", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3129,24 +3298,27 @@ public static Tensor identity_eager_fallback(Tensor input, string name, Context
     /// 
     /// </remarks>
     /// <param name="input"></param>
-    /// <param name="T"></param>
     /// <returns></returns>
-    public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = null)
+    public static Tensor[] identity_n(Tensors input, string? name = null)
     {
         var _ctx = tf.Context;
         if (_ctx.executing_eagerly())
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["T"] = T } });
-                return _fast_path_result[0];
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityN", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
             }
             catch (Exception)
             {
             }
             try
             {
-                return identity_n_eager_fallback(input, T: T, name: name, ctx: _ctx);
+                return identity_n_eager_fallback(input, name: name, ctx: _ctx);
             }
             catch (Exception)
             {
@@ -3154,7 +3326,6 @@ public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = nu
         }
         Dictionary<string, object> keywords = new();
         keywords["input"] = input;
-        keywords["T"] = T;
         var _op = tf.OpDefLib._apply_op_helper("IdentityN", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
@@ -3162,19 +3333,19 @@ public static Tensor identity_n(Tensor input, TF_DataType[] T, string? name = nu
             object[] _attrs = new object[] { "T", _op.get_attr("T") };
             _execute.record_gradient("IdentityN", _op.inputs, _attrs, _result);
         }
-        return _result[0];
+        return _result;
     }
 
-    public static Tensor identity_n_eager_fallback(Tensor input, TF_DataType[] T, string name, Context ctx)
+    public static Tensor[] identity_n_eager_fallback(Tensor input, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { input };
-        object[] _attrs = new object[] { "T", T };
+        object[] _attrs = new object[] { };
         var _result = _execute.execute("IdentityN", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
         if (_execute.must_record_gradient())
         {
             _execute.record_gradient("IdentityN", _inputs_flat, _attrs, _result);
         }
-        return _result[0];
+        return _result;
     }
     /// <summary>
     /// Returns immutable tensor from memory region.
@@ -3211,6 +3382,10 @@ public static Tensor immutable_const(TF_DataType dtype, Shape shape, string memo
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ImmutableConst", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype, ["shape"] = shape, ["memory_region_name"] = memory_region_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3264,6 +3439,10 @@ public static Tensor inplace_add(Tensor x, Tensor i, Tensor v, string? name = nu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceAdd", name) { args = new object[] { x, i, v }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3317,6 +3496,10 @@ public static Tensor inplace_sub(Tensor x, Tensor i, Tensor v, string? name = nu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceSub", name) { args = new object[] { x, i, v }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3370,6 +3553,10 @@ public static Tensor inplace_update(Tensor x, Tensor i, Tensor v, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InplaceUpdate", name) { args = new object[] { x, i, v }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3440,6 +3627,10 @@ public static Tensor invert_permutation(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvertPermutation", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3516,6 +3707,10 @@ public static Tensor[] list_diff(Tensor x, Tensor y, TF_DataType out_idx = TF_Da
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ListDiff", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["out_idx"] = out_idx } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3590,6 +3785,10 @@ public static Tensor lower_bound(Tensor sorted_inputs, Tensor values, TF_DataTyp
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LowerBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3684,6 +3883,10 @@ public static Tensor matrix_band_part(Tensor input, Tensor num_lower, Tensor num
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixBandPart", name) { args = new object[] { input, num_lower, num_upper }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3765,6 +3968,10 @@ public static Tensor matrix_diag(Tensor diagonal, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiag", name) { args = new object[] { diagonal }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3846,6 +4053,10 @@ public static Tensor matrix_diag_part(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPart", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3969,6 +4180,10 @@ public static Tensor matrix_diag_part_v2(Tensor input, Tensor k, Tensor padding_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV2", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4136,6 +4351,10 @@ public static Tensor matrix_diag_part_v3(Tensor input, Tensor k, Tensor padding_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagPartV3", name) { args = new object[] { input, k, padding_value }, attrs = new Dictionary<string, object>() { ["align"] = align } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4287,6 +4506,10 @@ public static Tensor matrix_diag_v2(Tensor diagonal, Tensor k, Tensor num_rows,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV2", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4475,6 +4698,10 @@ public static Tensor matrix_diag_v3(Tensor diagonal, Tensor k, Tensor num_rows,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixDiagV3", name) { args = new object[] { diagonal, k, num_rows, num_cols, padding_value }, attrs = new Dictionary<string, object>() { ["align"] = align } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4550,6 +4777,10 @@ public static Tensor matrix_set_diag(Tensor input, Tensor diagonal, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiag", name) { args = new object[] { input, diagonal }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4677,6 +4908,10 @@ public static Tensor matrix_set_diag_v2(Tensor input, Tensor diagonal, Tensor k,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV2", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4849,6 +5084,10 @@ public static Tensor matrix_set_diag_v3(Tensor input, Tensor diagonal, Tensor k,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatrixSetDiagV3", name) { args = new object[] { input, diagonal, k }, attrs = new Dictionary<string, object>() { ["align"] = align } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4944,6 +5183,10 @@ public static Tensor mirror_pad(Tensor input, Tensor paddings, string mode, stri
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPad", name) { args = new object[] { input, paddings }, attrs = new Dictionary<string, object>() { ["mode"] = mode } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5023,6 +5266,10 @@ public static Tensor mirror_pad_grad(Tensor input, Tensor paddings, string mode,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MirrorPadGrad", name) { args = new object[] { input, paddings }, attrs = new Dictionary<string, object>() { ["mode"] = mode } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5173,6 +5420,10 @@ public static Tensor one_hot(Tensor indices, Tensor depth, Tensor on_value, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OneHot", name) { args = new object[] { indices, depth, on_value, off_value }, attrs = new Dictionary<string, object>() { ["axis"] = axis } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5226,6 +5477,10 @@ public static Tensor ones_like(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "OnesLike", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5304,6 +5559,10 @@ public static Tensor pack(Tensors values, int axis = 0, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pack", name) { args = new object[] { values }, attrs = new Dictionary<string, object>() { ["axis"] = axis } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5384,6 +5643,10 @@ public static Tensor pad(Tensor input, Tensor paddings, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pad", name) { args = new object[] { input, paddings }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5464,6 +5727,10 @@ public static Tensor pad_v2(Tensor input, Tensor paddings, Tensor constant_value
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PadV2", name) { args = new object[] { input, paddings, constant_values }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5541,6 +5808,10 @@ public static Tensor parallel_concat(Tensors values, Shape shape, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ParallelConcat", name) { args = new object[] { values }, attrs = new Dictionary<string, object>() { ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5610,6 +5881,10 @@ public static Tensor placeholder(TF_DataType dtype, Shape shape = null, string?
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Placeholder", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype, ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5677,6 +5952,10 @@ public static Tensor placeholder_v2(TF_DataType dtype, Shape shape, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype, ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5732,6 +6011,10 @@ public static Tensor placeholder_with_default(Tensor input, Shape shape, string?
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PlaceholderWithDefault", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5799,6 +6082,10 @@ public static Tensor prevent_gradient(Tensor input, string message = "", string?
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PreventGradient", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["message"] = message } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5858,6 +6145,10 @@ public static Tensor quantize_and_dequantize(Tensor input, bool signed_input = t
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantize", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["input_min"] = input_min, ["input_max"] = input_max } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6011,6 +6302,10 @@ public static Tensor quantize_and_dequantize_v2(Tensor input, Tensor input_min,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV2", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary<string, object>() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6085,6 +6380,10 @@ public static Tensor quantize_and_dequantize_v3(Tensor input, Tensor input_min,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV3", name) { args = new object[] { input, input_min, input_max, num_bits }, attrs = new Dictionary<string, object>() { ["signed_input"] = signed_input, ["range_given"] = range_given, ["narrow_range"] = narrow_range, ["axis"] = axis } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6190,6 +6489,10 @@ public static Tensor quantize_and_dequantize_v4(Tensor input, Tensor input_min,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeAndDequantizeV4", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary<string, object>() { ["signed_input"] = signed_input, ["num_bits"] = num_bits, ["range_given"] = range_given, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6387,6 +6690,10 @@ public static Tensor[] quantize_v2(Tensor input, Tensor min_range, Tensor max_ra
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeV2", name) { args = new object[] { input, min_range, max_range }, attrs = new Dictionary<string, object>() { ["T"] = T, ["mode"] = mode, ["round_mode"] = round_mode, ["narrow_range"] = narrow_range, ["axis"] = axis, ["ensure_minimum_range"] = ensure_minimum_range } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6455,6 +6762,10 @@ public static Tensor[] quantized_concat(Tensor concat_dim, Tensors values, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConcat", name) { args = new object[] { concat_dim, values, input_mins, input_maxes }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6541,6 +6852,10 @@ public static Tensor[] quantized_instance_norm(Tensor x, Tensor x_min, Tensor x_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedInstanceNorm", name) { args = new object[] { x, x_min, x_max }, attrs = new Dictionary<string, object>() { ["output_range_given"] = output_range_given, ["given_y_min"] = given_y_min, ["given_y_max"] = given_y_max, ["variance_epsilon"] = variance_epsilon, ["min_separation"] = min_separation } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6605,6 +6920,10 @@ public static Tensor[] quantized_reshape(Tensor tensor, Tensor shape, Tensor inp
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReshape", name) { args = new object[] { tensor, shape, input_min, input_max }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6674,6 +6993,10 @@ public static Tensor rank(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rank", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6815,6 +7138,10 @@ public static Tensor reshape(Tensor tensor, Tensor shape, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reshape", name) { args = new object[] { tensor, shape }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6884,6 +7211,10 @@ public static Operation resource_strided_slice_assign(Tensor ref_, Tensor begin,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceStridedSliceAssign", name) { args = new object[] { ref_, begin, end, strides, value }, attrs = new Dictionary<string, object>() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6991,6 +7322,10 @@ public static Tensor reverse(Tensor tensor, Tensor dims, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reverse", name) { args = new object[] { tensor, dims }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7110,6 +7445,10 @@ public static Tensor reverse_sequence(Tensor input, Tensor seq_lengths, int seq_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseSequence", name) { args = new object[] { input, seq_lengths }, attrs = new Dictionary<string, object>() { ["seq_dim"] = seq_dim, ["batch_dim"] = batch_dim } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7210,6 +7549,10 @@ public static Tensor reverse_v2(Tensor tensor, Tensor axis, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReverseV2", name) { args = new object[] { tensor, axis }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7352,6 +7695,10 @@ public static Tensor scatter_nd(Tensor indices, Tensor updates, Tensor shape, st
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNd", name) { args = new object[] { indices, updates, shape }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7442,6 +7789,10 @@ public static Tensor scatter_nd_non_aliasing_add(Tensor input, Tensor indices, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ScatterNdNonAliasingAdd", name) { args = new object[] { input, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7506,6 +7857,10 @@ public static Tensor shape(Tensor input, TF_DataType out_type = TF_DataType.TF_I
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Shape", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7562,6 +7917,10 @@ public static Tensor[] shape_n(Tensors input, TF_DataType out_type = TF_DataType
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShapeN", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7628,6 +7987,10 @@ public static Tensor size(Tensor input, TF_DataType out_type = TF_DataType.TF_IN
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Size", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7690,6 +8053,10 @@ public static Tensor slice(Tensor input, Tensor begin, Tensor size, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Slice", name) { args = new object[] { input, begin, size }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7741,6 +8108,10 @@ public static Tensor snapshot(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Snapshot", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7879,6 +8250,10 @@ public static Tensor space_to_batch(Tensor input, Tensor paddings, int block_siz
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatch", name) { args = new object[] { input, paddings }, attrs = new Dictionary<string, object>() { ["block_size"] = block_size } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8048,6 +8423,10 @@ public static Tensor space_to_batch_nd(Tensor input, Tensor block_shape, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToBatchND", name) { args = new object[] { input, block_shape, paddings }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8192,6 +8571,10 @@ public static Tensor space_to_depth(Tensor input, int block_size = 0, string dat
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SpaceToDepth", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["block_size"] = block_size, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8254,6 +8637,10 @@ public static Tensor[] split(Tensor split_dim, Tensor value, int num_split = 0,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Split", name) { args = new object[] { split_dim, value }, attrs = new Dictionary<string, object>() { ["num_split"] = num_split } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8308,6 +8695,10 @@ public static Tensor[] split_v(Tensor value, Tensor size_splits, Tensor split_di
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SplitV", name) { args = new object[] { value, size_splits, split_dim }, attrs = new Dictionary<string, object>() { ["num_split"] = num_split } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8393,6 +8784,10 @@ public static Tensor squeeze(Tensor input, int[] squeeze_dims = null, string? na
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Squeeze", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["squeeze_dims"] = squeeze_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8504,6 +8899,10 @@ public static Tensor stop_gradient(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StopGradient", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8689,6 +9088,10 @@ public static Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSlice", name) { args = new object[] { input, begin, end, strides }, attrs = new Dictionary<string, object>() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8823,6 +9226,10 @@ public static Tensor strided_slice_grad(Tensor shape, Tensor begin, Tensor end,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StridedSliceGrad", name) { args = new object[] { shape, begin, end, strides, dy }, attrs = new Dictionary<string, object>() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8946,6 +9353,10 @@ public static Tensor tensor_scatter_add(Tensor tensor, Tensor indices, Tensor up
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterAdd", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9013,6 +9424,10 @@ public static Tensor tensor_scatter_max(Tensor tensor, Tensor indices, Tensor up
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMax", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9066,6 +9481,10 @@ public static Tensor tensor_scatter_min(Tensor tensor, Tensor indices, Tensor up
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterMin", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9185,6 +9604,10 @@ public static Tensor tensor_scatter_sub(Tensor tensor, Tensor indices, Tensor up
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterSub", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9278,6 +9701,10 @@ public static Tensor tensor_scatter_update(Tensor tensor, Tensor indices, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorScatterUpdate", name) { args = new object[] { tensor, indices, updates }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9348,6 +9775,10 @@ public static Tensor tensor_strided_slice_update(Tensor input, Tensor begin, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorStridedSliceUpdate", name) { args = new object[] { input, begin, end, strides, value }, attrs = new Dictionary<string, object>() { ["begin_mask"] = begin_mask, ["end_mask"] = end_mask, ["ellipsis_mask"] = ellipsis_mask, ["new_axis_mask"] = new_axis_mask, ["shrink_axis_mask"] = shrink_axis_mask } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9437,6 +9868,10 @@ public static Tensor tile(Tensor input, Tensor multiples, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tile", name) { args = new object[] { input, multiples }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9495,6 +9930,10 @@ public static Tensor tile_grad(Tensor input, Tensor multiples, string? name = nu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TileGrad", name) { args = new object[] { input, multiples }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9552,6 +9991,10 @@ public static Tensor transpose(Tensor x, Tensor perm, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Transpose", name) { args = new object[] { x, perm }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9629,6 +10072,10 @@ public static Tensor[] unique(Tensor x, TF_DataType out_idx = TF_DataType.TF_INT
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unique", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["out_idx"] = out_idx } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9728,6 +10175,10 @@ public static Tensor[] unique_v2(Tensor x, Tensor axis, TF_DataType out_idx = TF
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueV2", name) { args = new object[] { x, axis }, attrs = new Dictionary<string, object>() { ["out_idx"] = out_idx } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9801,6 +10252,10 @@ public static Tensor[] unique_with_counts(Tensor x, TF_DataType out_idx = TF_Dat
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCounts", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["out_idx"] = out_idx } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9904,6 +10359,10 @@ public static Tensor[] unique_with_counts_v2(Tensor x, Tensor axis, TF_DataType
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UniqueWithCountsV2", name) { args = new object[] { x, axis }, attrs = new Dictionary<string, object>() { ["out_idx"] = out_idx } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9978,6 +10437,10 @@ public static Tensor[] unpack(Tensor value, int num = 0, int axis = 0, string? n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Unpack", name) { args = new object[] { value }, attrs = new Dictionary<string, object>() { ["num"] = num, ["axis"] = axis } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -10054,6 +10517,10 @@ public static Tensor unravel_index(Tensor indices, Tensor dims, string? name = n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnravelIndex", name) { args = new object[] { indices, dims }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -10127,6 +10594,10 @@ public static Tensor upper_bound(Tensor sorted_inputs, Tensor values, TF_DataTyp
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UpperBound", name) { args = new object[] { sorted_inputs, values }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -10241,6 +10712,10 @@ public static Tensor where(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Where", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -10290,6 +10765,10 @@ public static Tensor zeros_like(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ZerosLike", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
diff --git a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs
index 5663f9c97..6ec426f58 100644
--- a/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_functional_ops.cs
@@ -1,128 +1,1089 @@
-using System;
-using System.Collections.Generic;
-using System.Text;
-using System.Xml.Linq;
-using Tensorflow.Contexts;
+/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/
+
 using Tensorflow.Eager;
-using Tensorflow.Functions;
+using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
-namespace Tensorflow.Operations
+namespace Tensorflow;
+
+public static class gen_functional_ops
 {
-    public class gen_functional_ops
+    /// <summary>
+    /// An n-way switch statement which calls a single branch function.
+    /// </summary>
+    /// <remarks>
+    /// 
+    ///     An n-way switch statement, implementing the following:
+    ///     ```
+    ///     switch (branch_index) {
+    ///       case 0:
+    ///         output = branches[0](input);
+    ///         break;
+    ///       case 1:
+    ///         output = branches[1](input);
+    ///         break;
+    ///       ...
+    ///       case [[nbranches-1]]:
+    ///       default:
+    ///         output = branches[nbranches-1](input);
+    ///         break;
+    ///     }
+    ///     ```
+    /// 
+    /// </remarks>
+    /// <param name="branch_index"></param>
+    /// <param name="input"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="branches">
+    /// 
+    ///       A list of functions each of which takes 'inputs' and returns a list of
+    ///       tensors, whose types are the same as what every other branch returns.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <returns></returns>
+    public static Tensor[] _case(Tensor branch_index, Tensors input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Case", name) { args = new object[] { branch_index, input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return case_eager_fallback(branch_index, input, Tout: Tout, branches: branches, output_shapes: output_shapes, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["branch_index"] = branch_index;
+        keywords["input"] = input;
+        keywords["Tout"] = Tout;
+        keywords["branches"] = branches;
+        keywords["output_shapes"] = output_shapes;
+        var _op = tf.OpDefLib._apply_op_helper("Case", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "branches", _op.get_attr("branches"), "output_shapes", _op.get_attr("output_shapes") };
+            _execute.record_gradient("Case", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] case_eager_fallback(Tensor branch_index, Tensor input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { branch_index, input };
+        object[] _attrs = new object[] { "branches", branches, "output_shapes", output_shapes };
+        var _result = _execute.execute("Case", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("Case", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// Return the index of device the op runs.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Given a list of device names, this operation returns the index of the device
+    /// this op runs. The length of the list is returned in two cases:
+    /// (1) Device does not exist in the given device list.
+    /// (2) It is in XLA compilation.
+    /// 
+    /// </remarks>
+    /// <param name="device_names"></param>
+    /// <returns></returns>
+    public static Tensor device_index(string[] device_names, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DeviceIndex", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["device_names"] = device_names } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return device_index_eager_fallback(device_names: device_names, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["device_names"] = device_names;
+        var _op = tf.OpDefLib._apply_op_helper("DeviceIndex", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "device_names", _op.get_attr("device_names") };
+            _execute.record_gradient("DeviceIndex", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor device_index_eager_fallback(string[] device_names, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { };
+        object[] _attrs = new object[] { "device_names", device_names };
+        var _result = _execute.execute("DeviceIndex", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("DeviceIndex", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// ~~%~~  This op is used as a placeholder in If branch functions. It doesn't provide a~~%~~  valid output when run, so must either be removed (e.g. replaced with a~~%~~  function input) or guaranteed not to be used (e.g. if mirroring an~~%~~  intermediate output needed for the gradient computation of the other branch).~~%~~
+    /// </summary>
+    /// <param name="dtype">
+    /// The type of the output.
+    /// </param>
+    /// <param name="shape">
+    /// 
+    ///     The purported shape of the output. This is only used for shape inference;
+    ///     the output will not necessarily have this shape. Can be a partial shape.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor fake_param(TF_DataType dtype, Shape shape, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FakeParam", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype, ["shape"] = shape } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return fake_param_eager_fallback(dtype: dtype, shape: shape, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["dtype"] = dtype;
+        keywords["shape"] = shape;
+        var _op = tf.OpDefLib._apply_op_helper("FakeParam", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape") };
+            _execute.record_gradient("FakeParam", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor fake_param_eager_fallback(TF_DataType dtype, Shape shape, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { };
+        object[] _attrs = new object[] { "dtype", dtype, "shape", shape };
+        var _result = _execute.execute("FakeParam", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("FakeParam", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Applies a for loop.
+    /// </summary>
+    /// <remarks>
+    /// 
+    ///   ```python
+    ///    output = input;
+    ///    for i in range(start, limit, delta)
+    ///      output = body(i, output);
+    ///   ```
+    /// 
+    /// </remarks>
+    /// <param name="start"></param>
+    /// <param name="limit"></param>
+    /// <param name="delta"></param>
+    /// <param name="input"></param>
+    /// <param name="body">
+    /// 
+    ///     A function that takes a list of tensors (int32, T) and returns another
+    ///     list of tensors (T).
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor[] _for(Tensor start, Tensor limit, Tensor delta, Tensors input, object body, string? name = null)
     {
-        public static Tensor[] partitioned_call(Tensors args, TF_DataType[] tout, EagerDefinedFunction f, 
-            string config = "", string config_proto = "", string executor_type = "", string name = null)
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
         {
-            var ctx = tf.Context;
-            if (ctx.executing_eagerly())
+            try
             {
-                try
-                {
-                    return tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "PartitionedCall", name,
-                        args, tout, f, config, config_proto, executor_type));
-                }
-                catch (Exception)
-                {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "For", name) { args = new object[] { start, limit, delta, input }, attrs = new Dictionary<string, object>() { ["body"] = body } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return for_eager_fallback(start, limit, delta, input, body: body, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["start"] = start;
+        keywords["limit"] = limit;
+        keywords["delta"] = delta;
+        keywords["input"] = input;
+        keywords["body"] = body;
+        var _op = tf.OpDefLib._apply_op_helper("For", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "T", _op.get_attr("T"), "body", _op.get_attr("body") };
+            _execute.record_gradient("For", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-                }
+    public static Tensor[] for_eager_fallback(Tensor start, Tensor limit, Tensor delta, Tensor input, object body, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { start, limit, delta, input };
+        object[] _attrs = new object[] { "body", body };
+        var _result = _execute.execute("For", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("For", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// output = cond ? then_branch(input) : else_branch(input)
+    /// </summary>
+    /// <param name="cond"></param>
+    /// <param name="input"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="then_branch">
+    /// 
+    ///       A function that takes 'inputs' and returns a list of tensors, whose
+    ///       types are the same as what else_branch returns.
+    /// 
+    /// </param>
+    /// <param name="else_branch">
+    /// 
+    ///     A function that takes 'inputs' and returns a list of tensors, whose
+    ///     types are the same as what then_branch returns.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <returns></returns>
+    public static Tensor[] _if(Tensor cond, Tensors input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "If", name) { args = new object[] { cond, input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return if_eager_fallback(cond, input, Tout: Tout, then_branch: then_branch, else_branch: else_branch, output_shapes: output_shapes, name: name, ctx: _ctx);
             }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["cond"] = cond;
+        keywords["input"] = input;
+        keywords["Tout"] = Tout;
+        keywords["then_branch"] = then_branch;
+        keywords["else_branch"] = else_branch;
+        keywords["output_shapes"] = output_shapes;
+        var _op = tf.OpDefLib._apply_op_helper("If", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tcond", _op._get_attr_type("Tcond"), "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "then_branch", _op.get_attr("then_branch"), "else_branch", _op.get_attr("else_branch"), "output_shapes", _op.get_attr("output_shapes") };
+            _execute.record_gradient("If", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-            if (config is null)
+    public static Tensor[] if_eager_fallback(Tensor cond, Tensor input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { cond, input };
+        object[] _attrs = new object[] { "Tcond", cond.dtype, "then_branch", then_branch, "else_branch", else_branch, "output_shapes", output_shapes };
+        var _result = _execute.execute("If", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("If", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// returns `f(inputs)`, where `f`'s body is placed and partitioned.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Asynchronously executes a function, potentially across multiple devices but
+    /// within a single process. The kernel places and partitions a given function's
+    /// underlying graph, and executes each of the partitioned subgraphs as a function.
+    /// 
+    /// </remarks>
+    /// <param name="args"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="f">
+    /// 
+    ///       A function that takes 'args', a list of tensors, and returns 'output',
+    ///       another list of tensors. Input and output types are specified by 'Tin'
+    ///       and 'Tout'. The function body of f will be placed and partitioned across
+    ///       devices, setting this op apart from the regular Call op.
+    /// 
+    /// </param>
+    /// <param name="config"></param>
+    /// <param name="config_proto"></param>
+    /// <param name="executor_type"></param>
+    /// <returns></returns>
+    public static Tensor[] partitioned_call(Tensors args, TF_DataType[] Tout, object f, string config = "", string config_proto = "", string executor_type = "", string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "PartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return partitioned_call_eager_fallback(args, Tout: Tout, f: f, config: config, config_proto: config_proto, executor_type: executor_type, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        if (config is null)
+        {
+            config = "";
+        }
+        if (config_proto is null)
+        {
+            config_proto = "";
+        }
+        if (executor_type is null)
+        {
+            executor_type = "";
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["args"] = args;
+        keywords["Tout"] = Tout;
+        keywords["f"] = f;
+        keywords["config"] = config;
+        keywords["config_proto"] = config_proto;
+        keywords["executor_type"] = executor_type;
+        var _op = tf.OpDefLib._apply_op_helper("PartitionedCall", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f"), "config", _op.get_attr("config"), "config_proto", _op.get_attr("config_proto"), "executor_type", _op.get_attr("executor_type") };
+            _execute.record_gradient("PartitionedCall", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] partitioned_call_eager_fallback(Tensor args, TF_DataType[] Tout, object f, string config, string config_proto, string executor_type, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { args };
+        object[] _attrs = new object[] { "f", f, "config", config, "config_proto", config_proto, "executor_type", executor_type };
+        var _result = _execute.execute("PartitionedCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("PartitionedCall", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// Runs function `f` on a remote device indicated by `target`.
+    /// </summary>
+    /// <param name="target"></param>
+    /// <param name="args"></param>
+    /// <param name="Tout">
+    /// 
+    /// The type list for the return values.
+    /// 
+    /// </param>
+    /// <param name="f">
+    /// 
+    /// The function to run remotely.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor[] remote_call(Tensor target, Tensors args, TF_DataType[] Tout, object f, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RemoteCall", name) { args = new object[] { target, args }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["f"] = f } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
             {
-                config = "";
+                throw ex;
             }
-            if (config_proto is null)
+            catch (Exception)
             {
-                config_proto = "";
             }
-            if (executor_type is null)
+            try
             {
-                executor_type = "";
+                return remote_call_eager_fallback(target, args, Tout: Tout, f: f, name: name, ctx: _ctx);
             }
-            Dictionary<string, object> kwargs = new();
-            kwargs["args"] = args;
-            kwargs["Tout"] = tout;
-            kwargs["f"] = f;
-            kwargs["config"] = config;
-            kwargs["config_proto"] = config_proto;
-            kwargs["executor_type"] = executor_type;
-            var output = tf.OpDefLib._apply_op_helper("PartitionedCall",
-                name, kwargs);
-            var result = output.outputs;
-            if (_execute.must_record_gradient())
+            catch (Exception)
             {
-                throw new NotImplementedException();
             }
-            return result;
         }
+        Dictionary<string, object> keywords = new();
+        keywords["target"] = target;
+        keywords["args"] = args;
+        keywords["Tout"] = Tout;
+        keywords["f"] = f;
+        var _op = tf.OpDefLib._apply_op_helper("RemoteCall", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f") };
+            _execute.record_gradient("RemoteCall", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-        public static Tensor[] partitioned_call_eager_fallback(Tensors args, TF_DataType[] tout, EagerDefinedFunction f,
-            string config, string config_proto, string executor_type, string name, Context ctx)
+    public static Tensor[] remote_call_eager_fallback(Tensor target, Tensor args, TF_DataType[] Tout, object f, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { target, args };
+        object[] _attrs = new object[] { "f", f };
+        var _result = _execute.execute("RemoteCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            // TODO(Rinne): implement it.
-            throw new NotImplementedException();
-            if(config is null)
+            _execute.record_gradient("RemoteCall", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// returns `f(inputs)`, where `f`'s body is placed and partitioned.
+    /// </summary>
+    /// <param name="args"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="f">
+    /// 
+    ///       A function that takes 'args', a list of tensors, and returns 'output',
+    ///       another list of tensors. Input and output types are specified by 'Tin'
+    ///       and 'Tout'. The function body of f will be placed and partitioned across
+    ///       devices, setting this op apart from the regular Call op. This op is
+    ///       stateful.
+    /// 
+    /// </param>
+    /// <param name="config"></param>
+    /// <param name="config_proto"></param>
+    /// <param name="executor_type"></param>
+    /// <returns></returns>
+    public static Tensor[] stateful_partitioned_call(Tensors args, TF_DataType[] Tout, object f, string config = "", string config_proto = "", string executor_type = "", string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
             {
-                config = "";
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatefulPartitionedCall", name) { args = new object[] { args }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["f"] = f, ["config"] = config, ["config_proto"] = config_proto, ["executor_type"] = executor_type } });
+                return _fast_path_result;
             }
-            if(config_proto is null)
+            catch (NotOkStatusException ex)
             {
-                config_proto = "";
+                throw ex;
             }
-            if(executor_type is null)
+            catch (Exception)
             {
-                executor_type = "";
             }
-            object[] attrs = new object[]
+            try
             {
+                return stateful_partitioned_call_eager_fallback(args, Tout: Tout, f: f, config: config, config_proto: config_proto, executor_type: executor_type, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        if (config is null)
+        {
+            config = "";
+        }
+        if (config_proto is null)
+        {
+            config_proto = "";
+        }
+        if (executor_type is null)
+        {
+            executor_type = "";
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["args"] = args;
+        keywords["Tout"] = Tout;
+        keywords["f"] = f;
+        keywords["config"] = config;
+        keywords["config_proto"] = config_proto;
+        keywords["executor_type"] = executor_type;
+        var _op = tf.OpDefLib._apply_op_helper("StatefulPartitionedCall", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f"), "config", _op.get_attr("config"), "config_proto", _op.get_attr("config_proto"), "executor_type", _op.get_attr("executor_type") };
+            _execute.record_gradient("StatefulPartitionedCall", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-            };
+    public static Tensor[] stateful_partitioned_call_eager_fallback(Tensor args, TF_DataType[] Tout, object f, string config, string config_proto, string executor_type, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { args };
+        object[] _attrs = new object[] { "f", f, "config", config, "config_proto", config_proto, "executor_type", executor_type };
+        var _result = _execute.execute("StatefulPartitionedCall", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("StatefulPartitionedCall", _inputs_flat, _attrs, _result);
         }
+        return _result;
+    }
+    /// <summary>
+    /// An n-way switch statement which calls a single branch function.
+    /// </summary>
+    /// <remarks>
+    /// 
+    ///     An n-way switch statement, implementing the following:
+    ///     ```
+    ///     switch (branch_index) {
+    ///       case 0:
+    ///         output = branches[0](input);
+    ///         break;
+    ///       case 1:
+    ///         output = branches[1](input);
+    ///         break;
+    ///       ...
+    ///       case [[nbranches-1]]:
+    ///       default:
+    ///         output = branches[nbranches-1](input);
+    ///         break;
+    ///     }
+    ///     ```
+    /// 
+    ///     This should only be used when the none of branches has stateful ops.
+    /// 
+    /// </remarks>
+    /// <param name="branch_index"></param>
+    /// <param name="input"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="branches">
+    /// 
+    ///       A list of functions each of which takes 'inputs' and returns a list of
+    ///       tensors, whose types are the same as what every other branch returns.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <returns></returns>
+    public static Tensor[] stateless_case(Tensor branch_index, Tensors input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessCase", name) { args = new object[] { branch_index, input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["branches"] = branches, ["output_shapes"] = output_shapes } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return stateless_case_eager_fallback(branch_index, input, Tout: Tout, branches: branches, output_shapes: output_shapes, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["branch_index"] = branch_index;
+        keywords["input"] = input;
+        keywords["Tout"] = Tout;
+        keywords["branches"] = branches;
+        keywords["output_shapes"] = output_shapes;
+        var _op = tf.OpDefLib._apply_op_helper("StatelessCase", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "branches", _op.get_attr("branches"), "output_shapes", _op.get_attr("output_shapes") };
+            _execute.record_gradient("StatelessCase", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-        public static Tensor[] symbolic_gradient(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name = null)
+    public static Tensor[] stateless_case_eager_fallback(Tensor branch_index, Tensor input, TF_DataType[] Tout, object[] branches, Shape[] output_shapes, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { branch_index, input };
+        object[] _attrs = new object[] { "branches", branches, "output_shapes", output_shapes };
+        var _result = _execute.execute("StatelessCase", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            var ctx = tf.Context;
-            if (ctx.executing_eagerly())
+            _execute.record_gradient("StatelessCase", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// output = cond ? then_branch(input) : else_branch(input)
+    /// </summary>
+    /// <param name="cond"></param>
+    /// <param name="input"></param>
+    /// <param name="Tout">
+    /// A list of output types.
+    /// </param>
+    /// <param name="then_branch">
+    /// 
+    ///       A function that takes 'inputs' and returns a list of tensors, whose
+    ///       types are the same as what else_branch returns.
+    /// 
+    /// </param>
+    /// <param name="else_branch">
+    /// 
+    ///     A function that takes 'inputs' and returns a list of tensors, whose
+    ///     types are the same as what then_branch returns.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <returns></returns>
+    public static Tensor[] stateless_if(Tensor cond, Tensors input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessIf", name) { args = new object[] { cond, input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["then_branch"] = then_branch, ["else_branch"] = else_branch, ["output_shapes"] = output_shapes } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
             {
-                try
-                {
-                    var _result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(
-                    tf.Context, "SymbolicGradient", name, input, Tout, f));
-                    return _result;
-                }
-                catch (Exception)
-                {
+                return stateless_if_eager_fallback(cond, input, Tout: Tout, then_branch: then_branch, else_branch: else_branch, output_shapes: output_shapes, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["cond"] = cond;
+        keywords["input"] = input;
+        keywords["Tout"] = Tout;
+        keywords["then_branch"] = then_branch;
+        keywords["else_branch"] = else_branch;
+        keywords["output_shapes"] = output_shapes;
+        var _op = tf.OpDefLib._apply_op_helper("StatelessIf", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tcond", _op._get_attr_type("Tcond"), "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "then_branch", _op.get_attr("then_branch"), "else_branch", _op.get_attr("else_branch"), "output_shapes", _op.get_attr("output_shapes") };
+            _execute.record_gradient("StatelessIf", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-                }
+    public static Tensor[] stateless_if_eager_fallback(Tensor cond, Tensor input, TF_DataType[] Tout, object then_branch, object else_branch, Shape[] output_shapes, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { cond, input };
+        object[] _attrs = new object[] { "Tcond", cond.dtype, "then_branch", then_branch, "else_branch", else_branch, "output_shapes", output_shapes };
+        var _result = _execute.execute("StatelessIf", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("StatelessIf", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// output = input; While (Cond(output)) { output = Body(output) }
+    /// </summary>
+    /// <param name="input"></param>
+    /// <param name="cond">
+    /// 
+    ///       A function takes 'input' and returns a tensor.  If the tensor is
+    ///       a scalar of non-boolean, the scalar is converted to a boolean
+    ///       according to the following rule: if the scalar is a numerical
+    ///       value, non-zero means True and zero means False; if the scalar is
+    ///       a string, non-empty means True and empty means False. If the
+    ///       tensor is not a scalar, non-emptiness means True and False
+    ///       otherwise.
+    /// 
+    ///       This should only be used when the while condition and body functions
+    ///       do not have stateful ops.
+    /// 
+    /// </param>
+    /// <param name="body">
+    /// 
+    ///       A function that takes a list of tensors and returns another
+    ///       list of tensors. Both lists have the same types as specified
+    ///       by T.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <param name="parallel_iterations"></param>
+    /// <returns></returns>
+    public static Tensor[] stateless_while(Tensors input, object cond, object body, Shape[] output_shapes, int parallel_iterations = 10, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "StatelessWhile", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return stateless_while_eager_fallback(input, cond: cond, body: body, output_shapes: output_shapes, parallel_iterations: parallel_iterations, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input"] = input;
+        keywords["cond"] = cond;
+        keywords["body"] = body;
+        keywords["output_shapes"] = output_shapes;
+        keywords["parallel_iterations"] = parallel_iterations;
+        var _op = tf.OpDefLib._apply_op_helper("StatelessWhile", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body", _op.get_attr("body"), "output_shapes", _op.get_attr("output_shapes"), "parallel_iterations", _op._get_attr_int("parallel_iterations") };
+            _execute.record_gradient("StatelessWhile", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-                try
-                {
-                    return symbolic_gradient_eager_fallback(input, Tout, f, name, ctx);
-                }
-                catch (Exception)
-                {
+    public static Tensor[] stateless_while_eager_fallback(Tensor input, object cond, object body, Shape[] output_shapes, int parallel_iterations, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input };
+        object[] _attrs = new object[] { "cond", cond, "body", body, "output_shapes", output_shapes, "parallel_iterations", parallel_iterations };
+        var _result = _execute.execute("StatelessWhile", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("StatelessWhile", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// Computes the gradient function for function f via backpropagation.
+    /// </summary>
+    /// <param name="input"></param>
+    /// <param name="Tout">
+    /// 
+    /// the type list for the input list.
+    /// 
+    /// </param>
+    /// <param name="f">
+    /// 
+    /// The function we want to compute the gradient for.
+    /// 
+    /// The function 'f' must be a numerical function which takes N inputs and
+    /// produces M outputs. Its gradient function 'g', which is computed by
+    /// this SymbolicGradient op is a function taking N + M inputs and
+    /// produces N outputs.
+    /// 
+    /// I.e. if we have
+    ///    (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
+    /// then, g is
+    ///    (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
+    ///                                      dL/dy1, dL/dy2, ..., dL/dy_M),
+    /// 
+    /// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
+    /// loss function). dL/dx_i is the partial derivative of L with respect
+    /// to x_i.
+    /// 
+    /// (Needs some math expert to say the comment above better.)
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor[] symbolic_gradient(Tensors input, TF_DataType[] Tout, object f, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SymbolicGradient", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["f"] = f } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return symbolic_gradient_eager_fallback(input, Tout: Tout, f: f, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input"] = input;
+        keywords["Tout"] = Tout;
+        keywords["f"] = f;
+        var _op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "Tin", _op.get_attr("Tin"), "Tout", _op.get_attr("Tout"), "f", _op.get_attr("f") };
+            _execute.record_gradient("SymbolicGradient", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
 
-                }
+    public static Tensor[] symbolic_gradient_eager_fallback(Tensor input, TF_DataType[] Tout, object f, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input };
+        object[] _attrs = new object[] { "f", f };
+        var _result = _execute.execute("SymbolicGradient", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("SymbolicGradient", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// Converts a tensor to a scalar predicate.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Converts a tensor to a scalar predicate with the following rules:
+    /// 
+    /// - For 0D tensors, truthiness is determined by comparing against a "zero"
+    ///   value. For numerical types it is the obvious zero. For strings it is the
+    ///   empty string.
+    /// 
+    /// - For >0D tensors, truthiness is determined by looking at the number of
+    ///   elements. If has zero elements, then the result is false. Otherwise the
+    ///   result is true.
+    /// 
+    /// This matches the behavior of If and While for determining if a tensor counts
+    /// as true/false for a branch condition.
+    /// 
+    /// </remarks>
+    /// <param name="input"></param>
+    /// <returns></returns>
+    public static Tensor to_bool(Tensor input, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ToBool", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
             }
-            var op = tf.OpDefLib._apply_op_helper("SymbolicGradient", name, new object[] { input, Tout, f });
-            var result = op.outputs;
-            if (_execute.must_record_gradient())
+            catch (NotOkStatusException ex)
             {
-                throw new NotImplementedException();
+                throw ex;
             }
-            return result;
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return to_bool_eager_fallback(input, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input"] = input;
+        var _op = tf.OpDefLib._apply_op_helper("ToBool", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "T", _op._get_attr_type("T") };
+            _execute.record_gradient("ToBool", _op.inputs, _attrs, _result);
         }
+        return _result[0];
+    }
 
-        public static Tensor[] symbolic_gradient_eager_fallback(Tensor[] input, TF_DataType[] Tout, NameAttrList f, string name, Context ctx)
+    public static Tensor to_bool_eager_fallback(Tensor input, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input };
+        object[] _attrs = new object[] { "T", input.dtype };
+        var _result = _execute.execute("ToBool", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            object[] attrs = new object[] { "Tin", input, "Tout", Tout, "f", f };
-            var result = _execute.execute("SymbolicGradient", Tout.Length, input, attrs, ctx, name);
-            if (_execute.must_record_gradient())
+            _execute.record_gradient("ToBool", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// output = input; While (Cond(output)) { output = Body(output) }
+    /// </summary>
+    /// <param name="input"></param>
+    /// <param name="cond">
+    /// 
+    ///       A function takes 'input' and returns a tensor.  If the tensor is
+    ///       a scalar of non-boolean, the scalar is converted to a boolean
+    ///       according to the following rule: if the scalar is a numerical
+    ///       value, non-zero means True and zero means False; if the scalar is
+    ///       a string, non-empty means True and empty means False. If the
+    ///       tensor is not a scalar, non-emptiness means True and False
+    ///       otherwise.
+    /// 
+    /// </param>
+    /// <param name="body">
+    /// 
+    ///       A function that takes a list of tensors and returns another
+    ///       list of tensors. Both lists have the same types as specified
+    ///       by T.
+    /// 
+    /// </param>
+    /// <param name="output_shapes"></param>
+    /// <param name="parallel_iterations"></param>
+    /// <returns></returns>
+    public static Tensor[] _while(Tensors input, object cond, object body, Shape[] output_shapes, int parallel_iterations = 10, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "While", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["cond"] = cond, ["body"] = body, ["output_shapes"] = output_shapes, ["parallel_iterations"] = parallel_iterations } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return while_eager_fallback(input, cond: cond, body: body, output_shapes: output_shapes, parallel_iterations: parallel_iterations, name: name, ctx: _ctx);
+            }
+            catch (Exception)
             {
-                throw new NotImplementedException();
             }
-            return result;
         }
+        Dictionary<string, object> keywords = new();
+        keywords["input"] = input;
+        keywords["cond"] = cond;
+        keywords["body"] = body;
+        keywords["output_shapes"] = output_shapes;
+        keywords["parallel_iterations"] = parallel_iterations;
+        var _op = tf.OpDefLib._apply_op_helper("While", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body", _op.get_attr("body"), "output_shapes", _op.get_attr("output_shapes"), "parallel_iterations", _op._get_attr_int("parallel_iterations") };
+            _execute.record_gradient("While", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] while_eager_fallback(Tensor input, object cond, object body, Shape[] output_shapes, int parallel_iterations, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input };
+        object[] _attrs = new object[] { "cond", cond, "body", body, "output_shapes", output_shapes, "parallel_iterations", parallel_iterations };
+        var _result = _execute.execute("While", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("While", _inputs_flat, _attrs, _result);
+        }
+        return _result;
     }
 }
diff --git a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
index 490cb1880..0b92ff360 100644
--- a/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_io_ops.cs
@@ -2,12 +2,50 @@
 
 using Tensorflow.Eager;
 using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
 namespace Tensorflow;
 
-internal static class gen_io_ops
+public static class gen_io_ops
 {
+    /// <summary>
+    /// A Reader that outputs fixed-length records from a file.
+    /// </summary>
+    /// <param name="header_bytes">
+    /// 
+    /// Number of bytes in the header, defaults to 0.
+    /// 
+    /// </param>
+    /// <param name="record_bytes">
+    /// 
+    /// Number of bytes in the record.
+    /// 
+    /// </param>
+    /// <param name="footer_bytes">
+    /// 
+    /// Number of bytes in the footer, defaults to 0.
+    /// 
+    /// </param>
+    /// <param name="hop_bytes">
+    /// 
+    /// Number of bytes to hop before each read. Default of 0 means using
+    /// record_bytes.
+    /// 
+    /// </param>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor fixed_length_record_reader(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -15,9 +53,13 @@ public static Tensor fixed_length_record_reader(int header_bytes = 0, int record
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReader", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["header_bytes"] = header_bytes, ["record_bytes"] = record_bytes, ["footer_bytes"] = footer_bytes, ["hop_bytes"] = hop_bytes, ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -29,8 +71,22 @@ public static Tensor fixed_length_record_reader(int header_bytes = 0, int record
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords);
+        keywords["header_bytes"] = header_bytes;
+        keywords["record_bytes"] = record_bytes;
+        keywords["footer_bytes"] = footer_bytes;
+        keywords["hop_bytes"] = hop_bytes;
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReader", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -51,6 +107,49 @@ public static Tensor fixed_length_record_reader_eager_fallback(int header_bytes,
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs fixed-length records from a file.
+    /// </summary>
+    /// <param name="header_bytes">
+    /// 
+    /// Number of bytes in the header, defaults to 0.
+    /// 
+    /// </param>
+    /// <param name="record_bytes">
+    /// 
+    /// Number of bytes in the record.
+    /// 
+    /// </param>
+    /// <param name="footer_bytes">
+    /// 
+    /// Number of bytes in the footer, defaults to 0.
+    /// 
+    /// </param>
+    /// <param name="hop_bytes">
+    /// 
+    /// Number of bytes to hop before each read. Default of 0 means using
+    /// record_bytes.
+    /// 
+    /// </param>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <param name="encoding">
+    /// 
+    /// The type of encoding for the file. Currently ZLIB and GZIP
+    /// are supported. Defaults to none.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int record_bytes = 0, int footer_bytes = 0, int hop_bytes = 0, string container = "", string shared_name = "", string encoding = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -58,9 +157,13 @@ public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int rec
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name, "header_bytes", header_bytes, "record_bytes", record_bytes, "footer_bytes", footer_bytes, "hop_bytes", hop_bytes, "container", container, "shared_name", shared_name, "encoding", encoding));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FixedLengthRecordReaderV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["header_bytes"] = header_bytes, ["record_bytes"] = record_bytes, ["footer_bytes"] = footer_bytes, ["hop_bytes"] = hop_bytes, ["container"] = container, ["shared_name"] = shared_name, ["encoding"] = encoding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -72,8 +175,27 @@ public static Tensor fixed_length_record_reader_v2(int header_bytes = 0, int rec
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
+        if (encoding is null)
+        {
+            encoding = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["header_bytes"] = header_bytes; keywords["record_bytes"] = record_bytes; keywords["footer_bytes"] = footer_bytes; keywords["hop_bytes"] = hop_bytes; keywords["container"] = container; keywords["shared_name"] = shared_name; keywords["encoding"] = encoding; var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords);
+        keywords["header_bytes"] = header_bytes;
+        keywords["record_bytes"] = record_bytes;
+        keywords["footer_bytes"] = footer_bytes;
+        keywords["hop_bytes"] = hop_bytes;
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        keywords["encoding"] = encoding;
+        var _op = tf.OpDefLib._apply_op_helper("FixedLengthRecordReaderV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -94,6 +216,28 @@ public static Tensor fixed_length_record_reader_v2_eager_fallback(int header_byt
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the queued work as both the key and value.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// To use, enqueue strings in a Queue.  ReaderRead will take the front
+    /// work string and output (work, work).
+    /// 
+    /// </remarks>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor identity_reader(string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -101,9 +245,13 @@ public static Tensor identity_reader(string container = "", string shared_name =
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReader", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -115,8 +263,18 @@ public static Tensor identity_reader(string container = "", string shared_name =
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords);
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("IdentityReader", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -137,6 +295,28 @@ public static Tensor identity_reader_eager_fallback(string container, string sha
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the queued work as both the key and value.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// To use, enqueue strings in a Queue.  ReaderRead will take the front
+    /// work string and output (work, work).
+    /// 
+    /// </remarks>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor identity_reader_v2(string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -144,9 +324,13 @@ public static Tensor identity_reader_v2(string container = "", string shared_nam
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IdentityReaderV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -158,8 +342,18 @@ public static Tensor identity_reader_v2(string container = "", string shared_nam
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords);
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("IdentityReaderV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -180,6 +374,18 @@ public static Tensor identity_reader_v2_eager_fallback(string container, string
         }
         return _result[0];
     }
+    /// <summary>
+    /// Returns the set of files matching one or more glob patterns.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Note that this routine only supports wildcard characters in the
+    /// basename portion of the pattern, not in the directory portion.
+    /// Note also that the order of filenames returned is deterministic.
+    /// 
+    /// </remarks>
+    /// <param name="pattern"></param>
+    /// <returns></returns>
     public static Tensor matching_files(Tensor pattern, string? name = null)
     {
         var _ctx = tf.Context;
@@ -187,9 +393,13 @@ public static Tensor matching_files(Tensor pattern, string? name = null)
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name, pattern));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatchingFiles", name) { args = new object[] { pattern }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -224,51 +434,11 @@ public static Tensor matching_files_eager_fallback(Tensor pattern, string name,
         }
         return _result[0];
     }
-    public static Operation merge_v2_checkpoints(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs = true, bool allow_missing_files = false, string? name = null)
-    {
-        var _ctx = tf.Context;
-        if (_ctx.executing_eagerly())
-        {
-            try
-            {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MergeV2Checkpoints", name, checkpoint_prefixes, destination_prefix, "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files));
-                return null;
-            }
-            catch (Exception)
-            {
-            }
-            try
-            {
-                return merge_v2_checkpoints_eager_fallback(checkpoint_prefixes, destination_prefix, delete_old_dirs: delete_old_dirs, allow_missing_files: allow_missing_files, name: name, ctx: _ctx);
-            }
-            catch (Exception)
-            {
-            }
-        }
-        Dictionary<string, object> keywords = new();
-        keywords["checkpoint_prefixes"] = checkpoint_prefixes;
-        keywords["destination_prefix"] = destination_prefix;
-        keywords["delete_old_dirs"] = delete_old_dirs; keywords["allow_missing_files"] = allow_missing_files; var _op = tf.OpDefLib._apply_op_helper("MergeV2Checkpoints", name, keywords);
-        var _result = _op.outputs;
-        if (_execute.must_record_gradient())
-        {
-            object[] _attrs = new object[] { "delete_old_dirs", _op._get_attr_bool("delete_old_dirs"), "allow_missing_files", _op._get_attr_bool("allow_missing_files") };
-            _execute.record_gradient("MergeV2Checkpoints", _op.inputs, _attrs, _result);
-        }
-        return _op;
-    }
-
-    public static Tensor merge_v2_checkpoints_eager_fallback(Tensor checkpoint_prefixes, Tensor destination_prefix, bool delete_old_dirs, bool allow_missing_files, string name, Context ctx)
-    {
-        Tensor[] _inputs_flat = new Tensor[] { checkpoint_prefixes, destination_prefix };
-        object[] _attrs = new object[] { "delete_old_dirs", delete_old_dirs, "allow_missing_files", allow_missing_files };
-        var _result = _execute.execute("MergeV2Checkpoints", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
-        if (_execute.must_record_gradient())
-        {
-            _execute.record_gradient("MergeV2Checkpoints", _inputs_flat, _attrs, _result);
-        }
-        return null;
-    }
+    /// <summary>
+    /// Reads and outputs the entire contents of the input filename.
+    /// </summary>
+    /// <param name="filename"></param>
+    /// <returns></returns>
     public static Tensor read_file(Tensor filename, string? name = null)
     {
         var _ctx = tf.Context;
@@ -276,9 +446,13 @@ public static Tensor read_file(Tensor filename, string? name = null)
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name, filename));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadFile", name) { args = new object[] { filename }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -313,6 +487,17 @@ public static Tensor read_file_eager_fallback(Tensor filename, string name, Cont
         }
         return _result[0];
     }
+    /// <summary>
+    /// Returns the number of records this Reader has produced.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This is the same as the number of ReaderRead executions that have
+    /// succeeded.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_num_records_produced(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -336,6 +521,17 @@ public static Tensor reader_num_records_produced_eager_fallback(Tensor reader_ha
     {
         throw new RuntimeError($"reader_num_records_produced op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Returns the number of records this Reader has produced.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This is the same as the number of ReaderRead executions that have
+    /// succeeded.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -343,9 +539,13 @@ public static Tensor reader_num_records_produced_v2(Tensor reader_handle, string
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name, reader_handle));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumRecordsProducedV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -380,6 +580,11 @@ public static Tensor reader_num_records_produced_v2_eager_fallback(Tensor reader
         }
         return _result[0];
     }
+    /// <summary>
+    /// Returns the number of work units this Reader has finished processing.
+    /// </summary>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_num_work_units_completed(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -403,6 +608,11 @@ public static Tensor reader_num_work_units_completed_eager_fallback(Tensor reade
     {
         throw new RuntimeError($"reader_num_work_units_completed op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Returns the number of work units this Reader has finished processing.
+    /// </summary>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -410,9 +620,13 @@ public static Tensor reader_num_work_units_completed_v2(Tensor reader_handle, st
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name, reader_handle));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderNumWorkUnitsCompletedV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -447,6 +661,19 @@ public static Tensor reader_num_work_units_completed_v2_eager_fallback(Tensor re
         }
         return _result[0];
     }
+    /// <summary>
+    /// Returns the next record (key, value pair) produced by a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Will dequeue from the input queue if necessary (e.g. when the
+    /// Reader needs to start reading from a new file since it has finished
+    /// with the previous file).
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="queue_handle"></param>
+    /// <returns></returns>
     public static Tensor[] reader_read(Tensor reader_handle, Tensor queue_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -471,6 +698,21 @@ public static Tensor[] reader_read_eager_fallback(Tensor reader_handle, Tensor q
     {
         throw new RuntimeError($"reader_read op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Returns up to `num_records` (key, value) pairs produced by a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Will dequeue from the input queue if necessary (e.g. when the
+    /// Reader needs to start reading from a new file since it has finished
+    /// with the previous file).
+    /// It may return less than `num_records` even before the last batch.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="queue_handle"></param>
+    /// <param name="num_records"></param>
+    /// <returns></returns>
     public static Tensor[] reader_read_up_to(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null)
     {
         var _ctx = tf.Context;
@@ -496,6 +738,21 @@ public static Tensor[] reader_read_up_to_eager_fallback(Tensor reader_handle, Te
     {
         throw new RuntimeError($"reader_read_up_to op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Returns up to `num_records` (key, value) pairs produced by a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Will dequeue from the input queue if necessary (e.g. when the
+    /// Reader needs to start reading from a new file since it has finished
+    /// with the previous file).
+    /// It may return less than `num_records` even before the last batch.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="queue_handle"></param>
+    /// <param name="num_records"></param>
+    /// <returns></returns>
     public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_handle, Tensor num_records, string? name = null)
     {
         var _ctx = tf.Context;
@@ -503,9 +760,13 @@ public static Tensor[] reader_read_up_to_v2(Tensor reader_handle, Tensor queue_h
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name, reader_handle, queue_handle, num_records));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadUpToV2", name) { args = new object[] { reader_handle, queue_handle, num_records }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -542,6 +803,19 @@ public static Tensor[] reader_read_up_to_v2_eager_fallback(Tensor reader_handle,
         }
         return _result;
     }
+    /// <summary>
+    /// Returns the next record (key, value pair) produced by a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Will dequeue from the input queue if necessary (e.g. when the
+    /// Reader needs to start reading from a new file since it has finished
+    /// with the previous file).
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="queue_handle"></param>
+    /// <returns></returns>
     public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -549,9 +823,13 @@ public static Tensor[] reader_read_v2(Tensor reader_handle, Tensor queue_handle,
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name, reader_handle, queue_handle));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderReadV2", name) { args = new object[] { reader_handle, queue_handle }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -587,6 +865,11 @@ public static Tensor[] reader_read_v2_eager_fallback(Tensor reader_handle, Tenso
         }
         return _result;
     }
+    /// <summary>
+    /// Restore a Reader to its initial clean state.
+    /// </summary>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Operation reader_reset(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -606,10 +889,15 @@ public static Operation reader_reset(Tensor reader_handle, string? name = null)
         return _op;
     }
 
-    public static Tensor reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx)
+    public static Operation reader_reset_eager_fallback(Tensor reader_handle, string name, Context ctx)
     {
         throw new RuntimeError($"reader_reset op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Restore a Reader to its initial clean state.
+    /// </summary>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Operation reader_reset_v2(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -617,9 +905,13 @@ public static Operation reader_reset_v2(Tensor reader_handle, string? name = nul
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name, reader_handle));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderResetV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -643,7 +935,7 @@ public static Operation reader_reset_v2(Tensor reader_handle, string? name = nul
         return _op;
     }
 
-    public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx)
+    public static Operation reader_reset_v2_eager_fallback(Tensor reader_handle, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { reader_handle };
         object[] _attrs = new object[] { };
@@ -654,6 +946,18 @@ public static Tensor reader_reset_v2_eager_fallback(Tensor reader_handle, string
         }
         return null;
     }
+    /// <summary>
+    /// Restore a reader to a previously saved state.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Not all Readers support being restored, so this can produce an
+    /// Unimplemented error.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="state"></param>
+    /// <returns></returns>
     public static Operation reader_restore_state(Tensor reader_handle, Tensor state, string? name = null)
     {
         var _ctx = tf.Context;
@@ -674,10 +978,22 @@ public static Operation reader_restore_state(Tensor reader_handle, Tensor state,
         return _op;
     }
 
-    public static Tensor reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx)
+    public static Operation reader_restore_state_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx)
     {
         throw new RuntimeError($"reader_restore_state op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Restore a reader to a previously saved state.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Not all Readers support being restored, so this can produce an
+    /// Unimplemented error.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <param name="state"></param>
+    /// <returns></returns>
     public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor state, string? name = null)
     {
         var _ctx = tf.Context;
@@ -685,9 +1001,13 @@ public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor sta
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name, reader_handle, state));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderRestoreStateV2", name) { args = new object[] { reader_handle, state }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -712,7 +1032,7 @@ public static Operation reader_restore_state_v2(Tensor reader_handle, Tensor sta
         return _op;
     }
 
-    public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx)
+    public static Operation reader_restore_state_v2_eager_fallback(Tensor reader_handle, Tensor state, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { reader_handle, state };
         object[] _attrs = new object[] { };
@@ -723,6 +1043,17 @@ public static Tensor reader_restore_state_v2_eager_fallback(Tensor reader_handle
         }
         return null;
     }
+    /// <summary>
+    /// Produce a string tensor that encodes the state of a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Not all Readers support being serialized, so this can produce an
+    /// Unimplemented error.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_serialize_state(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -746,6 +1077,17 @@ public static Tensor reader_serialize_state_eager_fallback(Tensor reader_handle,
     {
         throw new RuntimeError($"reader_serialize_state op does not support eager execution. Arg 'reader_handle' is a ref.");
     }
+    /// <summary>
+    /// Produce a string tensor that encodes the state of a Reader.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Not all Readers support being serialized, so this can produce an
+    /// Unimplemented error.
+    /// 
+    /// </remarks>
+    /// <param name="reader_handle"></param>
+    /// <returns></returns>
     public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? name = null)
     {
         var _ctx = tf.Context;
@@ -753,9 +1095,13 @@ public static Tensor reader_serialize_state_v2(Tensor reader_handle, string? nam
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name, reader_handle));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReaderSerializeStateV2", name) { args = new object[] { reader_handle }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -790,6 +1136,43 @@ public static Tensor reader_serialize_state_v2_eager_fallback(Tensor reader_hand
         }
         return _result[0];
     }
+    /// <summary>
+    /// Restores a tensor from checkpoint files.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Reads a tensor stored in one or several files. If there are several files (for
+    /// instance because a tensor was saved as slices), `file_pattern` may contain
+    /// wildcard symbols (`*` and `?`) in the filename portion only, not in the
+    /// directory portion.
+    /// 
+    /// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
+    /// in which file the requested tensor is likely to be found. This op will first
+    /// open the file at index `preferred_shard` in the list of matching files and try
+    /// to restore tensors from that file.  Only if some tensors or tensor slices are
+    /// not found in that first file, then the Op opens all the files. Setting
+    /// `preferred_shard` to match the value passed as the `shard` input
+    /// of a matching `Save` Op may speed up Restore.  This attribute only affects
+    /// performance, not correctness.  The default value -1 means files are processed in
+    /// order.
+    /// 
+    /// See also `RestoreSlice`.
+    /// 
+    /// </remarks>
+    /// <param name="file_pattern"></param>
+    /// <param name="tensor_name"></param>
+    /// <param name="dt">
+    /// 
+    /// The type of the tensor to be restored.
+    /// 
+    /// </param>
+    /// <param name="preferred_shard">
+    /// 
+    /// Index of file to open first if multiple files match
+    /// `file_pattern`.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataType dt, int preferred_shard = -1, string? name = null)
     {
         var _ctx = tf.Context;
@@ -797,9 +1180,13 @@ public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataTyp
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name, file_pattern, tensor_name, "dt", dt, "preferred_shard", preferred_shard));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Restore", name) { args = new object[] { file_pattern, tensor_name }, attrs = new Dictionary<string, object>() { ["dt"] = dt, ["preferred_shard"] = preferred_shard } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -814,7 +1201,9 @@ public static Tensor restore(Tensor file_pattern, Tensor tensor_name, TF_DataTyp
         Dictionary<string, object> keywords = new();
         keywords["file_pattern"] = file_pattern;
         keywords["tensor_name"] = tensor_name;
-        keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords);
+        keywords["dt"] = dt;
+        keywords["preferred_shard"] = preferred_shard;
+        var _op = tf.OpDefLib._apply_op_helper("Restore", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -835,6 +1224,34 @@ public static Tensor restore_eager_fallback(Tensor file_pattern, Tensor tensor_n
         }
         return _result[0];
     }
+    /// <summary>
+    /// Restores a tensor from checkpoint files.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This is like `Restore` except that restored tensor can be listed as filling
+    /// only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
+    /// larger tensor and the slice that the restored tensor covers.
+    /// 
+    /// The `shape_and_slice` input has the same format as the
+    /// elements of the `shapes_and_slices` input of the `SaveSlices` op.
+    /// 
+    /// </remarks>
+    /// <param name="file_pattern"></param>
+    /// <param name="tensor_name"></param>
+    /// <param name="shape_and_slice"></param>
+    /// <param name="dt">
+    /// 
+    /// The type of the tensor to be restored.
+    /// 
+    /// </param>
+    /// <param name="preferred_shard">
+    /// 
+    /// Index of file to open first if multiple files match
+    /// `file_pattern`. See the documentation for `Restore`.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tensor shape_and_slice, TF_DataType dt, int preferred_shard = -1, string? name = null)
     {
         var _ctx = tf.Context;
@@ -842,9 +1259,13 @@ public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tens
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name, file_pattern, tensor_name, shape_and_slice, "dt", dt, "preferred_shard", preferred_shard));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreSlice", name) { args = new object[] { file_pattern, tensor_name, shape_and_slice }, attrs = new Dictionary<string, object>() { ["dt"] = dt, ["preferred_shard"] = preferred_shard } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -860,7 +1281,9 @@ public static Tensor restore_slice(Tensor file_pattern, Tensor tensor_name, Tens
         keywords["file_pattern"] = file_pattern;
         keywords["tensor_name"] = tensor_name;
         keywords["shape_and_slice"] = shape_and_slice;
-        keywords["dt"] = dt; keywords["preferred_shard"] = preferred_shard; var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords);
+        keywords["dt"] = dt;
+        keywords["preferred_shard"] = preferred_shard;
+        var _op = tf.OpDefLib._apply_op_helper("RestoreSlice", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -881,15 +1304,49 @@ public static Tensor restore_slice_eager_fallback(Tensor file_pattern, Tensor te
         }
         return _result[0];
     }
-    public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null)
+    /// <summary>
+    /// Restores tensors from a V2 checkpoint.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// For backward compatibility with the V1 format, this Op currently allows
+    /// restoring from a V1 checkpoint as well:
+    ///   - This Op first attempts to find the V2 index file pointed to by "prefix", and
+    ///     if found proceed to read it as a V2 checkpoint;
+    ///   - Otherwise the V1 read path is invoked.
+    /// Relying on this behavior is not recommended, as the ability to fall back to read
+    /// V1 might be deprecated and eventually removed.
+    /// 
+    /// By default, restores the named tensors in full.  If the caller wishes to restore
+    /// specific slices of stored tensors, "shape_and_slices" should be non-empty
+    /// strings and correspondingly well-formed.
+    /// 
+    /// Callers must ensure all the named tensors are indeed stored in the checkpoint.
+    /// 
+    /// </remarks>
+    /// <param name="prefix"></param>
+    /// <param name="tensor_names"></param>
+    /// <param name="shape_and_slices"></param>
+    /// <param name="dtypes">
+    /// 
+    /// shape {N}.  The list of expected dtype for the tensors.  Must match
+    /// those stored in the checkpoint.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor[] restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string? name = null)
     {
         var _ctx = tf.Context;
         if (_ctx.executing_eagerly())
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name, prefix, tensor_names, shape_and_slices, "dtypes", dtypes));
-                return _fast_path_result[0];
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RestoreV2", name) { args = new object[] { prefix, tensor_names, shape_and_slices }, attrs = new Dictionary<string, object>() { ["dtypes"] = dtypes } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
             }
             catch (Exception)
             {
@@ -906,43 +1363,63 @@ public static Tensor restore_v2(Tensor prefix, Tensor tensor_names, Tensor shape
         keywords["prefix"] = prefix;
         keywords["tensor_names"] = tensor_names;
         keywords["shape_and_slices"] = shape_and_slices;
-        keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords);
+        keywords["dtypes"] = dtypes;
+        var _op = tf.OpDefLib._apply_op_helper("RestoreV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
             object[] _attrs = new object[] { "dtypes", _op.get_attr("dtypes") };
             _execute.record_gradient("RestoreV2", _op.inputs, _attrs, _result);
         }
-        return _result[0];
+        return _result;
     }
 
-    public static Tensor restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx)
+    public static Tensor[] restore_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, TF_DataType[] dtypes, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices };
-        object[] _attrs = new object[] { "dtypes", dtypes };
+        object[] _attrs = new object[] { };
         var _result = _execute.execute("RestoreV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
         if (_execute.must_record_gradient())
         {
             _execute.record_gradient("RestoreV2", _inputs_flat, _attrs, _result);
         }
-        return _result[0];
+        return _result;
     }
-    public static Operation save(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string? name = null)
+    /// <summary>
+    /// Saves the input tensors to disk.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
+    /// is written to `filename` with name `tensor_names[i]`.
+    /// 
+    /// See also `SaveSlices`.
+    /// 
+    /// </remarks>
+    /// <param name="filename"></param>
+    /// <param name="tensor_names"></param>
+    /// <param name="data"></param>
+    /// <returns></returns>
+    public static Operation save(Tensor filename, Tensor tensor_names, Tensors data, string? name = null)
     {
         var _ctx = tf.Context;
         if (_ctx.executing_eagerly())
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name, filename, tensor_names, data, "T", T));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Save", name) { args = new object[] { filename, tensor_names, data }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
             try
             {
-                return save_eager_fallback(filename, tensor_names, data, T: T, name: name, ctx: _ctx);
+                return save_eager_fallback(filename, tensor_names, data, name: name, ctx: _ctx);
             }
             catch (Exception)
             {
@@ -952,7 +1429,7 @@ public static Operation save(Tensor filename, Tensor tensor_names, Tensor data,
         keywords["filename"] = filename;
         keywords["tensor_names"] = tensor_names;
         keywords["data"] = data;
-        keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords);
+        var _op = tf.OpDefLib._apply_op_helper("Save", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -962,10 +1439,10 @@ public static Operation save(Tensor filename, Tensor tensor_names, Tensor data,
         return _op;
     }
 
-    public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, TF_DataType[] T, string name, Context ctx)
+    public static Operation save_eager_fallback(Tensor filename, Tensor tensor_names, Tensor data, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, data };
-        object[] _attrs = new object[] { "T", T };
+        object[] _attrs = new object[] { };
         var _result = _execute.execute("Save", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
         if (_execute.must_record_gradient())
         {
@@ -973,22 +1450,59 @@ public static Tensor save_eager_fallback(Tensor filename, Tensor tensor_names, T
         }
         return null;
     }
-    public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string? name = null)
+    /// <summary>
+    /// Saves input tensors slices to disk.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This is like `Save` except that tensors can be listed in the saved file as being
+    /// a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
+    /// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
+    /// have as many elements as `tensor_names`.
+    /// 
+    /// Elements of the `shapes_and_slices` input must either be:
+    /// 
+    /// *  The empty string, in which case the corresponding tensor is
+    ///    saved normally.
+    /// *  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
+    ///    `dimI` are the dimensions of the larger tensor and `slice-spec`
+    ///    specifies what part is covered by the tensor to save.
+    /// 
+    /// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
+    /// where each `sliceI` is either:
+    /// 
+    /// *  The string `-` meaning that the slice covers all indices of this dimension
+    /// *  `start,length` where `start` and `length` are integers.  In that
+    ///    case the slice covers `length` indices starting at `start`.
+    /// 
+    /// See also `Save`.
+    /// 
+    /// </remarks>
+    /// <param name="filename"></param>
+    /// <param name="tensor_names"></param>
+    /// <param name="shapes_and_slices"></param>
+    /// <param name="data"></param>
+    /// <returns></returns>
+    public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensors data, string? name = null)
     {
         var _ctx = tf.Context;
         if (_ctx.executing_eagerly())
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name, filename, tensor_names, shapes_and_slices, data, "T", T));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveSlices", name) { args = new object[] { filename, tensor_names, shapes_and_slices, data }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
             try
             {
-                return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, T: T, name: name, ctx: _ctx);
+                return save_slices_eager_fallback(filename, tensor_names, shapes_and_slices, data, name: name, ctx: _ctx);
             }
             catch (Exception)
             {
@@ -999,7 +1513,7 @@ public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor
         keywords["tensor_names"] = tensor_names;
         keywords["shapes_and_slices"] = shapes_and_slices;
         keywords["data"] = data;
-        keywords["T"] = T; var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords);
+        var _op = tf.OpDefLib._apply_op_helper("SaveSlices", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1009,10 +1523,10 @@ public static Operation save_slices(Tensor filename, Tensor tensor_names, Tensor
         return _op;
     }
 
-    public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, TF_DataType[] T, string name, Context ctx)
+    public static Operation save_slices_eager_fallback(Tensor filename, Tensor tensor_names, Tensor shapes_and_slices, Tensor data, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { filename, tensor_names, shapes_and_slices, data };
-        object[] _attrs = new object[] { "T", T };
+        object[] _attrs = new object[] { };
         var _result = _execute.execute("SaveSlices", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
         if (_execute.must_record_gradient())
         {
@@ -1020,22 +1534,41 @@ public static Tensor save_slices_eager_fallback(Tensor filename, Tensor tensor_n
         }
         return null;
     }
-    public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string? name = null)
+    /// <summary>
+    /// Saves tensors in V2 checkpoint format.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// By default, saves the named tensors in full.  If the caller wishes to save
+    /// specific slices of full tensors, "shape_and_slices" should be non-empty strings
+    /// and correspondingly well-formed.
+    /// 
+    /// </remarks>
+    /// <param name="prefix"></param>
+    /// <param name="tensor_names"></param>
+    /// <param name="shape_and_slices"></param>
+    /// <param name="tensors"></param>
+    /// <returns></returns>
+    public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensors tensors, string? name = null)
     {
         var _ctx = tf.Context;
         if (_ctx.executing_eagerly())
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name, prefix, tensor_names, shape_and_slices, tensors, "dtypes", dtypes));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SaveV2", name) { args = new object[] { prefix, tensor_names, shape_and_slices, tensors }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
             try
             {
-                return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, dtypes: dtypes, name: name, ctx: _ctx);
+                return save_v2_eager_fallback(prefix, tensor_names, shape_and_slices, tensors, name: name, ctx: _ctx);
             }
             catch (Exception)
             {
@@ -1046,7 +1579,7 @@ public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape
         keywords["tensor_names"] = tensor_names;
         keywords["shape_and_slices"] = shape_and_slices;
         keywords["tensors"] = tensors;
-        keywords["dtypes"] = dtypes; var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords);
+        var _op = tf.OpDefLib._apply_op_helper("SaveV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1056,10 +1589,10 @@ public static Operation save_v2(Tensor prefix, Tensor tensor_names, Tensor shape
         return _op;
     }
 
-    public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, TF_DataType[] dtypes, string name, Context ctx)
+    public static Operation save_v2_eager_fallback(Tensor prefix, Tensor tensor_names, Tensor shape_and_slices, Tensor tensors, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { prefix, tensor_names, shape_and_slices, tensors };
-        object[] _attrs = new object[] { "dtypes", dtypes };
+        object[] _attrs = new object[] { };
         var _result = _execute.execute("SaveV2", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
         if (_execute.must_record_gradient())
         {
@@ -1067,6 +1600,18 @@ public static Tensor save_v2_eager_fallback(Tensor prefix, Tensor tensor_names,
         }
         return null;
     }
+    /// <summary>
+    /// Generate a sharded filename. The filename is printf formatted as
+    /// </summary>
+    /// <remarks>
+    /// 
+    ///    %s-%05d-of-%05d, basename, shard, num_shards.
+    /// 
+    /// </remarks>
+    /// <param name="basename"></param>
+    /// <param name="shard"></param>
+    /// <param name="num_shards"></param>
+    /// <returns></returns>
     public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_shards, string? name = null)
     {
         var _ctx = tf.Context;
@@ -1074,9 +1619,13 @@ public static Tensor sharded_filename(Tensor basename, Tensor shard, Tensor num_
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name, basename, shard, num_shards));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilename", name) { args = new object[] { basename, shard, num_shards }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1113,6 +1662,12 @@ public static Tensor sharded_filename_eager_fallback(Tensor basename, Tensor sha
         }
         return _result[0];
     }
+    /// <summary>
+    /// Generate a glob pattern matching all sharded file names.
+    /// </summary>
+    /// <param name="basename"></param>
+    /// <param name="num_shards"></param>
+    /// <returns></returns>
     public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string? name = null)
     {
         var _ctx = tf.Context;
@@ -1120,9 +1675,13 @@ public static Tensor sharded_filespec(Tensor basename, Tensor num_shards, string
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name, basename, num_shards));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ShardedFilespec", name) { args = new object[] { basename, num_shards }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1158,6 +1717,27 @@ public static Tensor sharded_filespec_eager_fallback(Tensor basename, Tensor num
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the lines of a file delimited by '\n'.
+    /// </summary>
+    /// <param name="skip_header_lines">
+    /// 
+    /// Number of lines to skip from the beginning of every file.
+    /// 
+    /// </param>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor text_line_reader(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -1165,9 +1745,13 @@ public static Tensor text_line_reader(int skip_header_lines = 0, string containe
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReader", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["skip_header_lines"] = skip_header_lines, ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1179,8 +1763,19 @@ public static Tensor text_line_reader(int skip_header_lines = 0, string containe
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords);
+        keywords["skip_header_lines"] = skip_header_lines;
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("TextLineReader", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1201,6 +1796,27 @@ public static Tensor text_line_reader_eager_fallback(int skip_header_lines, stri
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the lines of a file delimited by '\n'.
+    /// </summary>
+    /// <param name="skip_header_lines">
+    /// 
+    /// Number of lines to skip from the beginning of every file.
+    /// 
+    /// </param>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor text_line_reader_v2(int skip_header_lines = 0, string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -1208,9 +1824,13 @@ public static Tensor text_line_reader_v2(int skip_header_lines = 0, string conta
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name, "skip_header_lines", skip_header_lines, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TextLineReaderV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["skip_header_lines"] = skip_header_lines, ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1222,8 +1842,19 @@ public static Tensor text_line_reader_v2(int skip_header_lines = 0, string conta
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["skip_header_lines"] = skip_header_lines; keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords);
+        keywords["skip_header_lines"] = skip_header_lines;
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("TextLineReaderV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1244,6 +1875,28 @@ public static Tensor text_line_reader_v2_eager_fallback(int skip_header_lines, s
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the entire contents of a file as a value.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// To use, enqueue filenames in a Queue.  The output of ReaderRead will
+    /// be a filename (key) and the contents of that file (value).
+    /// 
+    /// </remarks>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor whole_file_reader(string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -1251,9 +1904,13 @@ public static Tensor whole_file_reader(string container = "", string shared_name
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReader", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1265,8 +1922,18 @@ public static Tensor whole_file_reader(string container = "", string shared_name
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords);
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("WholeFileReader", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1287,6 +1954,28 @@ public static Tensor whole_file_reader_eager_fallback(string container, string s
         }
         return _result[0];
     }
+    /// <summary>
+    /// A Reader that outputs the entire contents of a file as a value.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// To use, enqueue filenames in a Queue.  The output of ReaderRead will
+    /// be a filename (key) and the contents of that file (value).
+    /// 
+    /// </remarks>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this reader is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this reader is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
     public static Tensor whole_file_reader_v2(string container = "", string shared_name = "", string? name = null)
     {
         var _ctx = tf.Context;
@@ -1294,9 +1983,13 @@ public static Tensor whole_file_reader_v2(string container = "", string shared_n
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name, "container", container, "shared_name", shared_name));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WholeFileReaderV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1308,8 +2001,18 @@ public static Tensor whole_file_reader_v2(string container = "", string shared_n
             {
             }
         }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
         Dictionary<string, object> keywords = new();
-        keywords["container"] = container; keywords["shared_name"] = shared_name; var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords);
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("WholeFileReaderV2", name, keywords);
         var _result = _op.outputs;
         if (_execute.must_record_gradient())
         {
@@ -1330,6 +2033,17 @@ public static Tensor whole_file_reader_v2_eager_fallback(string container, strin
         }
         return _result[0];
     }
+    /// <summary>
+    /// Writes `contents` to the file at input `filename`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Creates the file and recursively creates directory if it does not exist.
+    /// 
+    /// </remarks>
+    /// <param name="filename"></param>
+    /// <param name="contents"></param>
+    /// <returns></returns>
     public static Operation write_file(Tensor filename, Tensor contents, string? name = null)
     {
         var _ctx = tf.Context;
@@ -1337,9 +2051,13 @@ public static Operation write_file(Tensor filename, Tensor contents, string? nam
         {
             try
             {
-                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name, filename, contents));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "WriteFile", name) { args = new object[] { filename, contents }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1364,7 +2082,7 @@ public static Operation write_file(Tensor filename, Tensor contents, string? nam
         return _op;
     }
 
-    public static Tensor write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx)
+    public static Operation write_file_eager_fallback(Tensor filename, Tensor contents, string name, Context ctx)
     {
         Tensor[] _inputs_flat = new Tensor[] { filename, contents };
         object[] _attrs = new object[] { };
diff --git a/src/TensorFlowNET.Core/Operations/gen_list_ops.cs b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs
new file mode 100644
index 000000000..59c783b24
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/gen_list_ops.cs
@@ -0,0 +1,1308 @@
+/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/
+
+using Tensorflow.Eager;
+using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
+using static Tensorflow.Binding;
+
+namespace Tensorflow;
+
+public static class gen_list_ops
+{
+    /// <summary>
+    /// Creates and returns an empty tensor list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// All list elements must be tensors of dtype element_dtype and shape compatible
+    /// with element_shape.
+    /// 
+    /// handle: an empty tensor list.
+    /// element_dtype: the type of elements in the list.
+    /// element_shape: a shape compatible with that of elements in the list.
+    /// 
+    /// </remarks>
+    /// <param name="element_shape"></param>
+    /// <param name="max_num_elements"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor empty_tensor_list(Tensor element_shape, Tensor max_num_elements, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EmptyTensorList", name) { args = new object[] { element_shape, max_num_elements }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return empty_tensor_list_eager_fallback(element_shape, max_num_elements, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["element_shape"] = element_shape;
+        keywords["max_num_elements"] = max_num_elements;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("EmptyTensorList", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("EmptyTensorList", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor empty_tensor_list_eager_fallback(Tensor element_shape, Tensor max_num_elements, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { element_shape, max_num_elements };
+        object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("EmptyTensorList", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("EmptyTensorList", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Concats all tensors in the list along the 0th dimension.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Requires that all tensors have the same shape except the first dimension.
+    /// 
+    /// input_handle: The input list.
+    /// tensor: The concated result.
+    /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
+    /// 
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="element_dtype"></param>
+    /// <param name="element_shape"></param>
+    /// <returns></returns>
+    public static Tensor[] tensor_list_concat(Tensor input_handle, TF_DataType element_dtype, Shape element_shape = null, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcat", name) { args = new object[] { input_handle }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype, ["element_shape"] = element_shape } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_concat_eager_fallback(input_handle, element_dtype: element_dtype, element_shape: element_shape, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["element_dtype"] = element_dtype;
+        keywords["element_shape"] = element_shape;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListConcat", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "element_shape", _op.get_attr("element_shape") };
+            _execute.record_gradient("TensorListConcat", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] tensor_list_concat_eager_fallback(Tensor input_handle, TF_DataType element_dtype, Shape element_shape, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle };
+        object[] _attrs = new object[] { "element_dtype", element_dtype, "element_shape", element_shape };
+        var _result = _execute.execute("TensorListConcat", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListConcat", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="input_a"></param>
+    /// <param name="input_b"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_concat_lists(Tensor input_a, Tensor input_b, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatLists", name) { args = new object[] { input_a, input_b }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_concat_lists_eager_fallback(input_a, input_b, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_a"] = input_a;
+        keywords["input_b"] = input_b;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListConcatLists", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListConcatLists", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_concat_lists_eager_fallback(Tensor input_a, Tensor input_b, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_a, input_b };
+        object[] _attrs = new object[] { "element_dtype", element_dtype };
+        var _result = _execute.execute("TensorListConcatLists", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListConcatLists", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Concats all tensors in the list along the 0th dimension.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Requires that all tensors have the same shape except the first dimension.
+    /// 
+    /// input_handle: The input list.
+    /// element_shape: The shape of the uninitialized elements in the list. If the first
+    ///   dimension is not -1, it is assumed that all list elements have the same
+    ///   leading dim.
+    /// leading_dims: The list of leading dims of uninitialized list elements. Used if
+    ///   the leading dim of input_handle.element_shape or the element_shape input arg
+    ///   is not already set.
+    /// tensor: The concated result.
+    /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
+    /// 
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="leading_dims"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor[] tensor_list_concat_v2(Tensor input_handle, Tensor element_shape, Tensor leading_dims, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListConcatV2", name) { args = new object[] { input_handle, element_shape, leading_dims }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_concat_v2_eager_fallback(input_handle, element_shape, leading_dims, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["element_shape"] = element_shape;
+        keywords["leading_dims"] = leading_dims;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListConcatV2", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListConcatV2", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] tensor_list_concat_v2_eager_fallback(Tensor input_handle, Tensor element_shape, Tensor leading_dims, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape, leading_dims };
+        object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListConcatV2", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListConcatV2", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// The shape of the elements of the given list, as a tensor.
+    /// </summary>
+    /// <remarks>
+    /// 
+    ///   input_handle: the list
+    ///   element_shape: the shape of elements of the list
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="shape_type"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_element_shape(Tensor input_handle, TF_DataType shape_type, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListElementShape", name) { args = new object[] { input_handle }, attrs = new Dictionary<string, object>() { ["shape_type"] = shape_type } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_element_shape_eager_fallback(input_handle, shape_type: shape_type, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["shape_type"] = shape_type;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListElementShape", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListElementShape", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_element_shape_eager_fallback(Tensor input_handle, TF_DataType shape_type, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle };
+        object[] _attrs = new object[] { "shape_type", shape_type };
+        var _result = _execute.execute("TensorListElementShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListElementShape", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Creates a TensorList which, when stacked, has the value of `tensor`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Each tensor in the result list corresponds to one row of the input tensor.
+    /// 
+    /// tensor: The input tensor.
+    /// output_handle: The list.
+    /// 
+    /// </remarks>
+    /// <param name="tensor"></param>
+    /// <param name="element_shape"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_from_tensor(Tensor tensor, Tensor element_shape, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListFromTensor", name) { args = new object[] { tensor, element_shape }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_from_tensor_eager_fallback(tensor, element_shape, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["tensor"] = tensor;
+        keywords["element_shape"] = element_shape;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListFromTensor", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListFromTensor", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_from_tensor_eager_fallback(Tensor tensor, Tensor element_shape, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { tensor, element_shape };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListFromTensor", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListFromTensor", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Creates a Tensor by indexing into the TensorList.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Each row in the produced Tensor corresponds to the element in the TensorList
+    /// specified by the given index (see `tf.gather`).
+    /// 
+    /// input_handle: The input tensor list.
+    /// indices: The indices used to index into the list.
+    /// values: The tensor.
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="indices"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_gather(Tensor input_handle, Tensor indices, Tensor element_shape, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGather", name) { args = new object[] { input_handle, indices, element_shape }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_gather_eager_fallback(input_handle, indices, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["indices"] = indices;
+        keywords["element_shape"] = element_shape;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListGather", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListGather", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_gather_eager_fallback(Tensor input_handle, Tensor indices, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, indices, element_shape };
+        object[] _attrs = new object[] { "element_dtype", element_dtype };
+        var _result = _execute.execute("TensorListGather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListGather", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="input_handle"></param>
+    /// <param name="index"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_get_item(Tensor input_handle, Tensor index, Tensor element_shape, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListGetItem", name) { args = new object[] { input_handle, index, element_shape }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_get_item_eager_fallback(input_handle, index, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["index"] = index;
+        keywords["element_shape"] = element_shape;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListGetItem", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListGetItem", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_get_item_eager_fallback(Tensor input_handle, Tensor index, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, index, element_shape };
+        object[] _attrs = new object[] { "element_dtype", element_dtype };
+        var _result = _execute.execute("TensorListGetItem", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListGetItem", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Returns the number of tensors in the input tensor list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// input_handle: the input list
+    /// length: the number of tensors in the list
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_length(Tensor input_handle, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListLength", name) { args = new object[] { input_handle }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_length_eager_fallback(input_handle, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListLength", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("TensorListLength", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_length_eager_fallback(Tensor input_handle, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("TensorListLength", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListLength", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Returns the last element of the input list as well as a list with all but that element.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Fails if the list is empty.
+    /// 
+    /// input_handle: the input list
+    /// tensor: the withdrawn last element of the list
+    /// element_dtype: the type of elements in the list
+    /// element_shape: the shape of the output tensor
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor[] tensor_list_pop_back(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPopBack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_pop_back_eager_fallback(input_handle, element_shape, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["element_shape"] = element_shape;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListPopBack", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListPopBack", _op.inputs, _attrs, _result);
+        }
+        return _result;
+    }
+
+    public static Tensor[] tensor_list_pop_back_eager_fallback(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape };
+        object[] _attrs = new object[] { "element_dtype", element_dtype };
+        var _result = _execute.execute("TensorListPopBack", 2, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListPopBack", _inputs_flat, _attrs, _result);
+        }
+        return _result;
+    }
+    /// <summary>
+    /// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// tensor: The tensor to put on the list.
+    /// input_handle: The old list.
+    /// output_handle: A list with the elements of the old list followed by tensor.
+    /// element_dtype: the type of elements in the list.
+    /// element_shape: a shape compatible with that of elements in the list.
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="tensor"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_push_back(Tensor input_handle, Tensor tensor, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBack", name) { args = new object[] { input_handle, tensor }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_push_back_eager_fallback(input_handle, tensor, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["tensor"] = tensor;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListPushBack", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListPushBack", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_push_back_eager_fallback(Tensor input_handle, Tensor tensor, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, tensor };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype };
+        var _result = _execute.execute("TensorListPushBack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListPushBack", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="input_handles"></param>
+    /// <param name="tensor"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_push_back_batch(Tensor input_handles, Tensor tensor, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListPushBackBatch", name) { args = new object[] { input_handles, tensor }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_push_back_batch_eager_fallback(input_handles, tensor, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handles"] = input_handles;
+        keywords["tensor"] = tensor;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListPushBackBatch", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListPushBackBatch", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_push_back_batch_eager_fallback(Tensor input_handles, Tensor tensor, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handles, tensor };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype };
+        var _result = _execute.execute("TensorListPushBackBatch", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListPushBackBatch", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// List of the given size with empty elements.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// element_shape: the shape of the future elements of the list
+    /// num_elements: the number of elements to reserve
+    /// handle: the output list
+    /// element_dtype: the desired type of elements in the list.
+    /// 
+    /// </remarks>
+    /// <param name="element_shape"></param>
+    /// <param name="num_elements"></param>
+    /// <param name="element_dtype"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_reserve(Tensor element_shape, Tensor num_elements, TF_DataType element_dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListReserve", name) { args = new object[] { element_shape, num_elements }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_reserve_eager_fallback(element_shape, num_elements, element_dtype: element_dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["element_shape"] = element_shape;
+        keywords["num_elements"] = num_elements;
+        keywords["element_dtype"] = element_dtype;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListReserve", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListReserve", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_reserve_eager_fallback(Tensor element_shape, Tensor num_elements, TF_DataType element_dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { element_shape, num_elements };
+        object[] _attrs = new object[] { "element_dtype", element_dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListReserve", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListReserve", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Resizes the list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// 
+    /// input_handle: the input list
+    /// size: size of the output list
+    /// 
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="size"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_resize(Tensor input_handle, Tensor size, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListResize", name) { args = new object[] { input_handle, size }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_resize_eager_fallback(input_handle, size, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["size"] = size;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListResize", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("TensorListResize", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_resize_eager_fallback(Tensor input_handle, Tensor size, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, size };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("TensorListResize", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListResize", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Creates a TensorList by indexing into a Tensor.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Each member of the TensorList corresponds to one row of the input tensor,
+    /// specified by the given index (see `tf.gather`).
+    /// 
+    /// tensor: The input tensor.
+    /// indices: The indices used to index into the list.
+    /// element_shape: The shape of the elements in the list (can be less specified than
+    ///   the shape of the tensor).
+    /// output_handle: The TensorList.
+    /// 
+    /// </remarks>
+    /// <param name="tensor"></param>
+    /// <param name="indices"></param>
+    /// <param name="element_shape"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_scatter(Tensor tensor, Tensor indices, Tensor element_shape, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatter", name) { args = new object[] { tensor, indices, element_shape }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_scatter_eager_fallback(tensor, indices, element_shape, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["tensor"] = tensor;
+        keywords["indices"] = indices;
+        keywords["element_shape"] = element_shape;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListScatter", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListScatter", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_scatter_eager_fallback(Tensor tensor, Tensor indices, Tensor element_shape, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { tensor, indices, element_shape };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListScatter", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListScatter", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Scatters tensor at indices in an input list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Each member of the TensorList corresponds to one row of the input tensor,
+    /// specified by the given index (see `tf.gather`).
+    /// 
+    /// input_handle: The list to scatter into.
+    /// tensor: The input tensor.
+    /// indices: The indices used to index into the list.
+    /// output_handle: The TensorList.
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="tensor"></param>
+    /// <param name="indices"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_scatter_into_existing_list(Tensor input_handle, Tensor tensor, Tensor indices, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterIntoExistingList", name) { args = new object[] { input_handle, tensor, indices }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_scatter_into_existing_list_eager_fallback(input_handle, tensor, indices, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["tensor"] = tensor;
+        keywords["indices"] = indices;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListScatterIntoExistingList", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListScatterIntoExistingList", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_scatter_into_existing_list_eager_fallback(Tensor input_handle, Tensor tensor, Tensor indices, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, tensor, indices };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype };
+        var _result = _execute.execute("TensorListScatterIntoExistingList", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListScatterIntoExistingList", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Creates a TensorList by indexing into a Tensor.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Each member of the TensorList corresponds to one row of the input tensor,
+    /// specified by the given index (see `tf.gather`).
+    /// 
+    /// tensor: The input tensor.
+    /// indices: The indices used to index into the list.
+    /// element_shape: The shape of the elements in the list (can be less specified than
+    ///   the shape of the tensor).
+    /// num_elements: The size of the output list. Must be large enough to accommodate
+    ///   the largest index in indices. If -1, the list is just large enough to include
+    ///   the largest index in indices.
+    /// output_handle: The TensorList.
+    /// 
+    /// </remarks>
+    /// <param name="tensor"></param>
+    /// <param name="indices"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="num_elements"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_scatter_v2(Tensor tensor, Tensor indices, Tensor element_shape, Tensor num_elements, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListScatterV2", name) { args = new object[] { tensor, indices, element_shape, num_elements }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_scatter_v2_eager_fallback(tensor, indices, element_shape, num_elements, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["tensor"] = tensor;
+        keywords["indices"] = indices;
+        keywords["element_shape"] = element_shape;
+        keywords["num_elements"] = num_elements;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListScatterV2", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListScatterV2", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_scatter_v2_eager_fallback(Tensor tensor, Tensor indices, Tensor element_shape, Tensor num_elements, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { tensor, indices, element_shape, num_elements };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListScatterV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListScatterV2", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="input_handle"></param>
+    /// <param name="index"></param>
+    /// <param name="item"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_set_item(Tensor input_handle, Tensor index, Tensor item, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSetItem", name) { args = new object[] { input_handle, index, item }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_set_item_eager_fallback(input_handle, index, item, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["index"] = index;
+        keywords["item"] = item;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListSetItem", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype") };
+            _execute.record_gradient("TensorListSetItem", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_set_item_eager_fallback(Tensor input_handle, Tensor index, Tensor item, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, index, item };
+        object[] _attrs = new object[] { "element_dtype", item.dtype };
+        var _result = _execute.execute("TensorListSetItem", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListSetItem", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Splits a tensor into a list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// list[i] corresponds to lengths[i] tensors from the input tensor.
+    /// The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
+    /// 
+    /// tensor: The input tensor.
+    /// element_shape: A shape compatible with that of elements in the tensor.
+    /// lengths: Vector of sizes of the 0th dimension of tensors in the list.
+    /// output_handle: The list.
+    /// 
+    /// </remarks>
+    /// <param name="tensor"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="lengths"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_split(Tensor tensor, Tensor element_shape, Tensor lengths, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListSplit", name) { args = new object[] { tensor, element_shape, lengths }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_split_eager_fallback(tensor, element_shape, lengths, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["tensor"] = tensor;
+        keywords["element_shape"] = element_shape;
+        keywords["lengths"] = lengths;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListSplit", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "shape_type", _op._get_attr_type("shape_type") };
+            _execute.record_gradient("TensorListSplit", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_split_eager_fallback(Tensor tensor, Tensor element_shape, Tensor lengths, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { tensor, element_shape, lengths };
+        object[] _attrs = new object[] { "element_dtype", tensor.dtype, "shape_type", element_shape.dtype };
+        var _result = _execute.execute("TensorListSplit", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListSplit", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Stacks all tensors in the list.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Requires that all tensors have the same shape.
+    /// 
+    /// input_handle: the input list
+    /// tensor: the gathered result
+    /// num_elements: optional. If not -1, the number of elements in the list.
+    /// 
+    /// 
+    /// </remarks>
+    /// <param name="input_handle"></param>
+    /// <param name="element_shape"></param>
+    /// <param name="element_dtype"></param>
+    /// <param name="num_elements"></param>
+    /// <returns></returns>
+    public static Tensor tensor_list_stack(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, int num_elements = -1, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TensorListStack", name) { args = new object[] { input_handle, element_shape }, attrs = new Dictionary<string, object>() { ["element_dtype"] = element_dtype, ["num_elements"] = num_elements } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return tensor_list_stack_eager_fallback(input_handle, element_shape, element_dtype: element_dtype, num_elements: num_elements, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input_handle"] = input_handle;
+        keywords["element_shape"] = element_shape;
+        keywords["element_dtype"] = element_dtype;
+        keywords["num_elements"] = num_elements;
+        var _op = tf.OpDefLib._apply_op_helper("TensorListStack", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "element_dtype", _op._get_attr_type("element_dtype"), "num_elements", _op._get_attr_int("num_elements") };
+            _execute.record_gradient("TensorListStack", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
+
+    public static Tensor tensor_list_stack_eager_fallback(Tensor input_handle, Tensor element_shape, TF_DataType element_dtype, int num_elements, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input_handle, element_shape };
+        object[] _attrs = new object[] { "element_dtype", element_dtype, "num_elements", num_elements };
+        var _result = _execute.execute("TensorListStack", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("TensorListStack", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+}
diff --git a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
index 3456d9b3d..a8152a11e 100644
--- a/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_math_ops.cs
@@ -2,6 +2,7 @@
 
 using Tensorflow.Eager;
 using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
 namespace Tensorflow;
@@ -30,6 +31,10 @@ public static Tensor abs(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Abs", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -96,6 +101,10 @@ public static Tensor accumulate_nv2(Tensors inputs, Shape shape, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AccumulateNV2", name) { args = new object[] { inputs }, attrs = new Dictionary<string, object>() { ["shape"] = shape } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -157,6 +166,10 @@ public static Tensor acos(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acos", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -217,6 +230,10 @@ public static Tensor acosh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Acosh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -278,6 +295,10 @@ public static Tensor add(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Add", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -338,6 +359,10 @@ public static Tensor add_n(Tensors inputs, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddN", name) { args = new object[] { inputs }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -396,6 +421,10 @@ public static Tensor add_v2(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AddV2", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -460,6 +489,10 @@ public static Tensor all(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "All", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -533,6 +566,10 @@ public static Tensor angle(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Angle", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -597,6 +634,10 @@ public static Tensor any(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Any", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -650,6 +691,10 @@ public static Tensor approximate_equal(Tensor x, Tensor y, float tolerance = 1E-
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproximateEqual", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["tolerance"] = tolerance } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -718,6 +763,10 @@ public static Tensor arg_max(Tensor input, Tensor dimension, TF_DataType output_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMax", name) { args = new object[] { input, dimension }, attrs = new Dictionary<string, object>() { ["output_type"] = output_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -786,6 +835,10 @@ public static Tensor arg_min(Tensor input, Tensor dimension, TF_DataType output_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ArgMin", name) { args = new object[] { input, dimension }, attrs = new Dictionary<string, object>() { ["output_type"] = output_type } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -857,6 +910,10 @@ public static Tensor asin(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asin", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -918,6 +975,10 @@ public static Tensor asinh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Asinh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -987,6 +1048,10 @@ public static Tensor atan(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1055,6 +1120,10 @@ public static Tensor atan2(Tensor y, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atan2", name) { args = new object[] { y, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1119,6 +1188,10 @@ public static Tensor atanh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Atanh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1201,6 +1274,10 @@ public static Tensor batch_mat_mul(Tensor x, Tensor y, bool adj_x = false, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMul", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1291,6 +1368,10 @@ public static Tensor batch_mat_mul_v2(Tensor x, Tensor y, bool adj_x = false, bo
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV2", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["adj_x"] = adj_x, ["adj_y"] = adj_y } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1386,6 +1467,10 @@ public static Tensor batch_mat_mul_v3(Tensor x, Tensor y, TF_DataType Tout, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchMatMulV3", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout, ["adj_x"] = adj_x, ["adj_y"] = adj_y } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1458,6 +1543,10 @@ public static Tensor betainc(Tensor a, Tensor b, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Betainc", name) { args = new object[] { a, b, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1522,6 +1611,10 @@ public static Tensor bincount(Tensor arr, Tensor size, Tensor weights, string? n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bincount", name) { args = new object[] { arr, size, weights }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1592,6 +1685,10 @@ public static Tensor bucketize(Tensor input, float[] boundaries, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Bucketize", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["boundaries"] = boundaries } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1644,6 +1741,10 @@ public static Tensor cast(Tensor x, TF_DataType DstT, bool Truncate = false, str
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cast", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["DstT"] = DstT, ["Truncate"] = Truncate } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1695,6 +1796,10 @@ public static Tensor ceil(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ceil", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1754,6 +1859,10 @@ public static Tensor clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ClipByValue", name) { args = new object[] { t, clip_value_min, clip_value_max }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1825,6 +1934,10 @@ public static Tensor complex(Tensor real, Tensor imag, TF_DataType Tout = TF_Dat
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Complex", name) { args = new object[] { real, imag }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1892,6 +2005,10 @@ public static Tensor complex_abs(Tensor x, TF_DataType Tout = TF_DataType.TF_FLO
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ComplexAbs", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1959,6 +2076,10 @@ public static Tensor conj(Tensor input, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conj", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2021,6 +2142,10 @@ public static Tensor cos(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cos", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2082,6 +2207,10 @@ public static Tensor cosh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cosh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2139,6 +2268,10 @@ public static Tensor cross(Tensor a, Tensor b, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cross", name) { args = new object[] { a, b }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2232,6 +2365,10 @@ public static Tensor cumprod(Tensor x, Tensor axis, bool exclusive = false, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumprod", name) { args = new object[] { x, axis }, attrs = new Dictionary<string, object>() { ["exclusive"] = exclusive, ["reverse"] = reverse } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2327,6 +2464,10 @@ public static Tensor cumsum(Tensor x, Tensor axis, bool exclusive = false, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Cumsum", name) { args = new object[] { x, axis }, attrs = new Dictionary<string, object>() { ["exclusive"] = exclusive, ["reverse"] = reverse } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2412,6 +2553,10 @@ public static Tensor cumulative_logsumexp(Tensor x, Tensor axis, bool exclusive
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "CumulativeLogsumexp", name) { args = new object[] { x, axis }, attrs = new Dictionary<string, object>() { ["exclusive"] = exclusive, ["reverse"] = reverse } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2482,6 +2627,10 @@ public static Tensor dense_bincount(Tensor input, Tensor size, Tensor weights, b
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DenseBincount", name) { args = new object[] { input, size, weights }, attrs = new Dictionary<string, object>() { ["binary_output"] = binary_output } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2539,6 +2688,10 @@ public static Tensor digamma(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Digamma", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2595,6 +2748,10 @@ public static Tensor div(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Div", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2653,6 +2810,10 @@ public static Tensor div_no_nan(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DivNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2721,6 +2882,10 @@ public static Tensor equal(Tensor x, Tensor y, bool incompatible_shape_error = t
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Equal", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["incompatible_shape_error"] = incompatible_shape_error } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2772,6 +2937,10 @@ public static Tensor erf(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erf", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2821,6 +2990,10 @@ public static Tensor erfc(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfc", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2870,6 +3043,10 @@ public static Tensor erfinv(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Erfinv", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2933,6 +3110,10 @@ public static Tensor euclidean_norm(Tensor input, Tensor reduction_indices, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EuclideanNorm", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3014,6 +3195,10 @@ public static Tensor exp(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Exp", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3080,6 +3265,10 @@ public static Tensor expm1(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Expm1", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3129,6 +3318,10 @@ public static Tensor floor(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Floor", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3185,6 +3378,10 @@ public static Tensor floor_div(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorDiv", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3246,6 +3443,10 @@ public static Tensor floor_mod(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FloorMod", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3315,6 +3516,10 @@ public static Tensor greater(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Greater", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3384,6 +3589,10 @@ public static Tensor greater_equal(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "GreaterEqual", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3456,6 +3665,10 @@ public static Tensor histogram_fixed_width(Tensor values, Tensor value_range, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "HistogramFixedWidth", name) { args = new object[] { values, value_range, nbins }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3526,6 +3739,10 @@ public static Tensor igamma(Tensor a, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igamma", name) { args = new object[] { a, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3577,6 +3794,10 @@ public static Tensor igamma_grad_a(Tensor a, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IgammaGradA", name) { args = new object[] { a, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3644,6 +3865,10 @@ public static Tensor igammac(Tensor a, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Igammac", name) { args = new object[] { a, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3710,6 +3935,10 @@ public static Tensor imag(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Imag", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3765,6 +3994,10 @@ public static Tensor inv(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Inv", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3821,6 +4054,10 @@ public static Tensor inv_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InvGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3885,6 +4122,10 @@ public static Tensor is_finite(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsFinite", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3948,6 +4189,10 @@ public static Tensor is_inf(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsInf", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4011,6 +4256,10 @@ public static Tensor is_nan(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsNan", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4079,6 +4328,10 @@ public static Tensor less(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Less", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4148,6 +4401,10 @@ public static Tensor less_equal(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LessEqual", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4211,6 +4468,10 @@ public static Tensor lgamma(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Lgamma", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4275,6 +4536,10 @@ public static Tensor lin_space(Tensor start, Tensor stop, Tensor num, string? na
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LinSpace", name) { args = new object[] { start, stop, num }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4338,6 +4603,10 @@ public static Tensor log(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4399,6 +4668,10 @@ public static Tensor log1p(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Log1p", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4455,6 +4728,10 @@ public static Tensor logical_and(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalAnd", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4505,6 +4782,10 @@ public static Tensor logical_not(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalNot", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4561,6 +4842,10 @@ public static Tensor logical_or(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogicalOr", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4633,6 +4918,10 @@ public static Tensor mat_mul(Tensor a, Tensor b, bool transpose_a = false, bool
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MatMul", name) { args = new object[] { a, b }, attrs = new Dictionary<string, object>() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4699,6 +4988,10 @@ public static Tensor max(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Max", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4757,6 +5050,10 @@ public static Tensor maximum(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Maximum", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4821,6 +5118,10 @@ public static Tensor mean(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mean", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4886,6 +5187,10 @@ public static Tensor min(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Min", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4944,6 +5249,10 @@ public static Tensor minimum(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Minimum", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5004,6 +5313,10 @@ public static Tensor mod(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mod", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5061,6 +5374,10 @@ public static Tensor mul(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Mul", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5118,6 +5435,10 @@ public static Tensor mul_no_nan(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MulNoNan", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5168,6 +5489,10 @@ public static Tensor ndtri(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Ndtri", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5222,6 +5547,10 @@ public static Tensor neg(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Neg", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5283,6 +5612,10 @@ public static Tensor next_after(Tensor x1, Tensor x2, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NextAfter", name) { args = new object[] { x1, x2 }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5341,6 +5674,10 @@ public static Tensor not_equal(Tensor x, Tensor y, bool incompatible_shape_error
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NotEqual", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { ["incompatible_shape_error"] = incompatible_shape_error } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5404,6 +5741,10 @@ public static Tensor polygamma(Tensor a, Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Polygamma", name) { args = new object[] { a, x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5467,6 +5808,10 @@ public static Tensor pow(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Pow", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5531,6 +5876,10 @@ public static Tensor prod(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Prod", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5615,6 +5964,10 @@ public static Tensor[] quantize_down_and_shrink_range(Tensor input, Tensor input
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizeDownAndShrinkRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5673,6 +6026,10 @@ public static Tensor[] quantized_add(Tensor x, Tensor y, Tensor min_x, Tensor ma
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAdd", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5758,6 +6115,10 @@ public static Tensor[] quantized_mat_mul(Tensor a, Tensor b, Tensor min_a, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMul", name) { args = new object[] { a, b, min_a, max_a, min_b, max_b }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["Tactivation"] = Tactivation } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5822,6 +6183,10 @@ public static Tensor[] quantized_mul(Tensor x, Tensor y, Tensor min_x, Tensor ma
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMul", name) { args = new object[] { x, y, min_x, max_x, min_y, max_y }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5896,6 +6261,10 @@ public static Tensor ragged_bincount(Tensor splits, Tensor values, Tensor size,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RaggedBincount", name) { args = new object[] { splits, values, size, weights }, attrs = new Dictionary<string, object>() { ["binary_output"] = binary_output } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5966,6 +6335,10 @@ public static Tensor range(Tensor start, Tensor limit, Tensor delta, string? nam
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Range", name) { args = new object[] { start, limit, delta }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6033,6 +6406,10 @@ public static Tensor real(Tensor input, TF_DataType Tout = TF_DataType.TF_FLOAT,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Real", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["Tout"] = Tout } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6092,6 +6469,10 @@ public static Tensor real_div(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RealDiv", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6147,6 +6528,10 @@ public static Tensor reciprocal(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Reciprocal", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6203,6 +6588,10 @@ public static Tensor reciprocal_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReciprocalGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6263,6 +6652,10 @@ public static Tensor[] requantization_range(Tensor input, Tensor input_min, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRange", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6322,6 +6715,10 @@ public static Tensor[] requantization_range_per_channel(Tensor input, Tensor inp
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizationRangePerChannel", name) { args = new object[] { input, input_min, input_max }, attrs = new Dictionary<string, object>() { ["clip_value_max"] = clip_value_max } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6394,6 +6791,10 @@ public static Tensor[] requantize(Tensor input, Tensor input_min, Tensor input_m
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Requantize", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6457,6 +6858,10 @@ public static Tensor[] requantize_per_channel(Tensor input, Tensor input_min, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RequantizePerChannel", name) { args = new object[] { input, input_min, input_max, requested_output_min, requested_output_max }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6524,6 +6929,10 @@ public static Tensor rint(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rint", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6579,6 +6988,10 @@ public static Tensor round(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Round", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6633,6 +7046,10 @@ public static Tensor rsqrt(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Rsqrt", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6689,6 +7106,10 @@ public static Tensor rsqrt_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "RsqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6771,6 +7192,10 @@ public static Tensor segment_max(Tensor data, Tensor segment_ids, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMax", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6855,6 +7280,10 @@ public static Tensor segment_mean(Tensor data, Tensor segment_ids, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMean", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6937,6 +7366,10 @@ public static Tensor segment_min(Tensor data, Tensor segment_ids, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentMin", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7019,6 +7452,10 @@ public static Tensor segment_prod(Tensor data, Tensor segment_ids, string? name
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentProd", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7101,6 +7538,10 @@ public static Tensor segment_sum(Tensor data, Tensor segment_ids, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SegmentSum", name) { args = new object[] { data, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7195,6 +7636,10 @@ public static Tensor select(Tensor condition, Tensor t, Tensor e, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Select", name) { args = new object[] { condition, t, e }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7248,6 +7693,10 @@ public static Tensor select_v2(Tensor condition, Tensor t, Tensor e, string? nam
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SelectV2", name) { args = new object[] { condition, t, e }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7304,6 +7753,10 @@ public static Tensor sigmoid(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sigmoid", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7360,6 +7813,10 @@ public static Tensor sigmoid_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SigmoidGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7421,6 +7878,10 @@ public static Tensor sign(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sign", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7482,6 +7943,10 @@ public static Tensor sin(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sin", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7543,6 +8008,10 @@ public static Tensor sinh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sinh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7605,6 +8074,10 @@ public static Tensor sobol_sample(Tensor dim, Tensor num_results, Tensor skip, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SobolSample", name) { args = new object[] { dim, num_results, skip }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7677,6 +8150,10 @@ public static Tensor sparse_bincount(Tensor indices, Tensor values, Tensor dense
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseBincount", name) { args = new object[] { indices, values, dense_shape, size, weights }, attrs = new Dictionary<string, object>() { ["binary_output"] = binary_output } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7749,6 +8226,10 @@ public static Tensor sparse_mat_mul(Tensor a, Tensor b, bool transpose_a = false
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseMatMul", name) { args = new object[] { a, b }, attrs = new Dictionary<string, object>() { ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["a_is_sparse"] = a_is_sparse, ["b_is_sparse"] = b_is_sparse } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7813,6 +8294,10 @@ public static Tensor sparse_segment_mean(Tensor data, Tensor indices, Tensor seg
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMean", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7873,6 +8358,10 @@ public static Tensor sparse_segment_mean_grad(Tensor grad, Tensor indices, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7938,6 +8427,10 @@ public static Tensor sparse_segment_mean_with_num_segments(Tensor data, Tensor i
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentMeanWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8000,6 +8493,10 @@ public static Tensor sparse_segment_sqrt_n(Tensor data, Tensor indices, Tensor s
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSqrtN", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8086,6 +8583,10 @@ public static Tensor sparse_segment_sum(Tensor data, Tensor indices, Tensor segm
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSum", name) { args = new object[] { data, indices, segment_ids }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8146,6 +8647,10 @@ public static Tensor sparse_segment_sum_grad(Tensor grad, Tensor indices, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumGrad", name) { args = new object[] { grad, indices, segment_ids, output_dim0 }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8232,6 +8737,10 @@ public static Tensor sparse_segment_sum_with_num_segments(Tensor data, Tensor in
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSegmentSumWithNumSegments", name) { args = new object[] { data, indices, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8289,6 +8798,10 @@ public static Tensor sqrt(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sqrt", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8345,6 +8858,10 @@ public static Tensor sqrt_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SqrtGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8400,6 +8917,10 @@ public static Tensor square(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Square", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8456,6 +8977,10 @@ public static Tensor squared_difference(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SquaredDifference", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8513,6 +9038,10 @@ public static Tensor sub(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sub", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8577,6 +9106,10 @@ public static Tensor sum(Tensor input, Tensor reduction_indices, bool keep_dims
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Sum", name) { args = new object[] { input, reduction_indices }, attrs = new Dictionary<string, object>() { ["keep_dims"] = keep_dims } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8641,6 +9174,10 @@ public static Tensor tan(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tan", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8704,6 +9241,10 @@ public static Tensor tanh(Tensor x, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Tanh", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8760,6 +9301,10 @@ public static Tensor tanh_grad(Tensor y, Tensor dy, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TanhGrad", name) { args = new object[] { y, dy }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8822,6 +9367,10 @@ public static Tensor truncate_div(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateDiv", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8882,6 +9431,10 @@ public static Tensor truncate_mod(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TruncateMod", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8973,6 +9526,10 @@ public static Tensor unsorted_segment_max(Tensor data, Tensor segment_ids, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMax", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9060,6 +9617,10 @@ public static Tensor unsorted_segment_min(Tensor data, Tensor segment_ids, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentMin", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9146,6 +9707,10 @@ public static Tensor unsorted_segment_prod(Tensor data, Tensor segment_ids, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentProd", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9236,6 +9801,10 @@ public static Tensor unsorted_segment_sum(Tensor data, Tensor segment_ids, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "UnsortedSegmentSum", name) { args = new object[] { data, segment_ids, num_segments }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9288,6 +9857,10 @@ public static Tensor xdivy(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xdivy", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9339,6 +9912,10 @@ public static Tensor xlog1py(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlog1py", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9390,6 +9967,10 @@ public static Tensor xlogy(Tensor x, Tensor y, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Xlogy", name) { args = new object[] { x, y }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -9449,6 +10030,10 @@ public static Tensor zeta(Tensor x, Tensor q, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Zeta", name) { args = new object[] { x, q }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
diff --git a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs
index c0cec2785..59c740c46 100644
--- a/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_nn_ops.cs
@@ -2,6 +2,7 @@
 
 using Tensorflow.Eager;
 using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
 namespace Tensorflow;
@@ -57,6 +58,10 @@ public static Tensor[] approx_top_k(Tensor input, int k = 0, int reduction_dimen
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ApproxTopK", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["k"] = k, ["reduction_dimension"] = reduction_dimension, ["recall_target"] = recall_target, ["is_max_k"] = is_max_k, ["reduction_input_size_override"] = reduction_input_size_override, ["aggregate_to_topk"] = aggregate_to_topk } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -142,6 +147,10 @@ public static Tensor avg_pool(Tensor value, int[] ksize, int[] strides, string p
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool", name) { args = new object[] { value }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -231,6 +240,10 @@ public static Tensor avg_pool3d(Tensor input, int[] ksize, int[] strides, string
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3D", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -315,6 +328,10 @@ public static Tensor avg_pool3d_grad(Tensor orig_input_shape, Tensor grad, int[]
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPool3DGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -398,6 +415,10 @@ public static Tensor avg_pool_grad(Tensor orig_input_shape, Tensor grad, int[] k
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AvgPoolGrad", name) { args = new object[] { orig_input_shape, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -476,6 +497,10 @@ public static Tensor batch_norm_with_global_normalization(Tensor t, Tensor m, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalization", name) { args = new object[] { t, m, v, beta, gamma }, attrs = new Dictionary<string, object>() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -551,6 +576,10 @@ public static Tensor[] batch_norm_with_global_normalization_grad(Tensor t, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BatchNormWithGlobalNormalizationGrad", name) { args = new object[] { t, m, v, gamma, backprop }, attrs = new Dictionary<string, object>() { ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -624,6 +653,10 @@ public static Tensor bias_add(Tensor value, Tensor bias, string data_format = "N
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAdd", name) { args = new object[] { value, bias }, attrs = new Dictionary<string, object>() { ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -697,6 +730,10 @@ public static Tensor bias_add_grad(Tensor out_backprop, string data_format = "NH
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddGrad", name) { args = new object[] { out_backprop }, attrs = new Dictionary<string, object>() { ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -760,6 +797,10 @@ public static Tensor bias_add_v1(Tensor value, Tensor bias, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "BiasAddV1", name) { args = new object[] { value, bias }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -883,6 +924,10 @@ public static Tensor conv2d(Tensor input, Tensor filter, int[] strides, string p
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2D", name) { args = new object[] { input, filter }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -992,6 +1037,10 @@ public static Tensor conv2d_backprop_filter(Tensor input, Tensor filter_sizes, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1102,6 +1151,10 @@ public static Tensor conv2d_backprop_input(Tensor input_sizes, Tensor filter, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv2DBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["use_cudnn_on_gpu"] = use_cudnn_on_gpu, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1206,6 +1259,10 @@ public static Tensor conv3d(Tensor input, Tensor filter, int[] strides, string p
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3D", name) { args = new object[] { input, filter }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1282,6 +1339,10 @@ public static Tensor conv3d_backprop_filter(Tensor input, Tensor filter, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1371,6 +1432,10 @@ public static Tensor conv3d_backprop_filter_v2(Tensor input, Tensor filter_sizes
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropFilterV2", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1448,6 +1513,10 @@ public static Tensor conv3d_backprop_input(Tensor input, Tensor filter, Tensor o
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1537,6 +1606,10 @@ public static Tensor conv3d_backprop_input_v2(Tensor input_sizes, Tensor filter,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Conv3DBackpropInputV2", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1611,6 +1684,10 @@ public static Tensor data_format_dim_map(Tensor x, string src_format = "NHWC", s
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatDimMap", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["src_format"] = src_format, ["dst_format"] = dst_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1715,6 +1792,10 @@ public static Tensor data_format_vec_permute(Tensor x, string src_format = "NHWC
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DataFormatVecPermute", name) { args = new object[] { x }, attrs = new Dictionary<string, object>() { ["src_format"] = src_format, ["dst_format"] = dst_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1835,6 +1916,10 @@ public static Tensor depthwise_conv2d_native(Tensor input, Tensor filter, int[]
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNative", name) { args = new object[] { input, filter }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -1934,6 +2019,10 @@ public static Tensor depthwise_conv2d_native_backprop_filter(Tensor input, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropFilter", name) { args = new object[] { input, filter_sizes, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2034,6 +2123,10 @@ public static Tensor depthwise_conv2d_native_backprop_input(Tensor input_sizes,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DepthwiseConv2dNativeBackpropInput", name) { args = new object[] { input_sizes, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format, ["dilations"] = dilations } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2139,6 +2232,10 @@ public static Tensor dilation2d(Tensor input, Tensor filter, int[] strides, int[
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2D", name) { args = new object[] { input, filter }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2211,6 +2308,10 @@ public static Tensor dilation2d_backprop_filter(Tensor input, Tensor filter, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropFilter", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2284,6 +2385,10 @@ public static Tensor dilation2d_backprop_input(Tensor input, Tensor filter, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Dilation2DBackpropInput", name) { args = new object[] { input, filter, out_backprop }, attrs = new Dictionary<string, object>() { ["strides"] = strides, ["rates"] = rates, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2358,6 +2463,10 @@ public static Tensor elu(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Elu", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2408,6 +2517,10 @@ public static Tensor elu_grad(Tensor gradients, Tensor outputs, string? name = n
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "EluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2516,6 +2629,10 @@ public static Tensor[] fractional_avg_pool(Tensor value, float[] pooling_ratio,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPool", name) { args = new object[] { value }, attrs = new Dictionary<string, object>() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2596,6 +2713,10 @@ public static Tensor fractional_avg_pool_grad(Tensor orig_input_tensor_shape, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalAvgPoolGrad", name) { args = new object[] { orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary<string, object>() { ["overlapping"] = overlapping } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2731,6 +2852,10 @@ public static Tensor[] fractional_max_pool(Tensor value, float[] pooling_ratio,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPool", name) { args = new object[] { value }, attrs = new Dictionary<string, object>() { ["pooling_ratio"] = pooling_ratio, ["pseudo_random"] = pseudo_random, ["overlapping"] = overlapping, ["deterministic"] = deterministic, ["seed"] = seed, ["seed2"] = seed2 } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2803,6 +2928,10 @@ public static Tensor fractional_max_pool_grad(Tensor orig_input, Tensor orig_out
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FractionalMaxPoolGrad", name) { args = new object[] { orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence }, attrs = new Dictionary<string, object>() { ["overlapping"] = overlapping } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2884,6 +3013,10 @@ public static Tensor[] fused_batch_norm(Tensor x, Tensor scale, Tensor offset, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNorm", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -2972,6 +3105,10 @@ public static Tensor[] fused_batch_norm_grad(Tensor y_backprop, Tensor x, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGrad", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3059,6 +3196,10 @@ public static Tensor[] fused_batch_norm_grad_v2(Tensor y_backprop, Tensor x, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV2", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2 }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3147,6 +3288,10 @@ public static Tensor[] fused_batch_norm_grad_v3(Tensor y_backprop, Tensor x, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormGradV3", name) { args = new object[] { y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3 }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3235,6 +3380,10 @@ public static Tensor[] fused_batch_norm_v2(Tensor x, Tensor scale, Tensor offset
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV2", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3323,6 +3472,10 @@ public static Tensor[] fused_batch_norm_v3(Tensor x, Tensor scale, Tensor offset
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedBatchNormV3", name) { args = new object[] { x, scale, offset, mean, variance }, attrs = new Dictionary<string, object>() { ["epsilon"] = epsilon, ["exponential_avg_factor"] = exponential_avg_factor, ["data_format"] = data_format, ["is_training"] = is_training } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3413,6 +3566,10 @@ public static Tensor fused_pad_conv2d(Tensor input, Tensor paddings, Tensor filt
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedPadConv2D", name) { args = new object[] { input, paddings, filter }, attrs = new Dictionary<string, object>() { ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3502,6 +3659,10 @@ public static Tensor fused_resize_and_pad_conv2d(Tensor input, Tensor size, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "FusedResizeAndPadConv2D", name) { args = new object[] { input, size, paddings, filter }, attrs = new Dictionary<string, object>() { ["resize_align_corners"] = resize_align_corners, ["mode"] = mode, ["strides"] = strides, ["padding"] = padding } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3582,6 +3743,10 @@ public static Tensor in_top_k(Tensor predictions, Tensor targets, int k = 0, str
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopK", name) { args = new object[] { predictions, targets }, attrs = new Dictionary<string, object>() { ["k"] = k } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3653,6 +3818,10 @@ public static Tensor in_top_kv2(Tensor predictions, Tensor targets, Tensor k, st
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "InTopKV2", name) { args = new object[] { predictions, targets, k }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3707,6 +3876,10 @@ public static Tensor[] isotonic_regression(Tensor input, TF_DataType output_dtyp
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "IsotonicRegression", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["output_dtype"] = output_dtype } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3792,6 +3965,10 @@ public static Tensor lrn(Tensor input, int depth_radius = 5, float bias = 1f, fl
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LRN", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["depth_radius"] = depth_radius, ["bias"] = bias, ["alpha"] = alpha, ["beta"] = beta } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3846,6 +4023,10 @@ public static Tensor leaky_relu(Tensor features, float alpha = 0.2f, string? nam
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyRelu", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { ["alpha"] = alpha } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3898,6 +4079,10 @@ public static Tensor leaky_relu_grad(Tensor gradients, Tensor features, float al
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LeakyReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary<string, object>() { ["alpha"] = alpha } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -3956,6 +4141,10 @@ public static Tensor log_softmax(Tensor logits, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "LogSoftmax", name) { args = new object[] { logits }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4035,6 +4224,10 @@ public static Tensor max_pool(Tensor input, int[] ksize, int[] strides, string p
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4119,6 +4312,10 @@ public static Tensor max_pool3d(Tensor input, int[] ksize, int[] strides, string
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3D", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4204,6 +4401,10 @@ public static Tensor max_pool3d_grad(Tensor orig_input, Tensor orig_output, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4291,6 +4492,10 @@ public static Tensor max_pool3d_grad_grad(Tensor orig_input, Tensor orig_output,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPool3DGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4382,6 +4587,10 @@ public static Tensor max_pool_grad(Tensor orig_input, Tensor orig_output, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["explicit_paddings"] = explicit_paddings, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4469,6 +4678,10 @@ public static Tensor max_pool_grad_grad(Tensor orig_input, Tensor orig_output, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGrad", name) { args = new object[] { orig_input, orig_output, grad }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4546,6 +4759,10 @@ public static Tensor max_pool_grad_grad_v2(Tensor orig_input, Tensor orig_output
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary<string, object>() { ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4628,6 +4845,10 @@ public static Tensor max_pool_grad_grad_with_argmax(Tensor input, Tensor grad, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4701,6 +4922,10 @@ public static Tensor max_pool_grad_v2(Tensor orig_input, Tensor orig_output, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradV2", name) { args = new object[] { orig_input, orig_output, grad, ksize, strides }, attrs = new Dictionary<string, object>() { ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4783,6 +5008,10 @@ public static Tensor max_pool_grad_with_argmax(Tensor input, Tensor grad, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolGradWithArgmax", name) { args = new object[] { input, grad, argmax }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4854,6 +5083,10 @@ public static Tensor max_pool_v2(Tensor input, Tensor ksize, Tensor strides, str
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolV2", name) { args = new object[] { input, ksize, strides }, attrs = new Dictionary<string, object>() { ["padding"] = padding, ["data_format"] = data_format } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -4946,6 +5179,10 @@ public static Tensor[] max_pool_with_argmax(Tensor input, int[] ksize, int[] str
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MaxPoolWithArgmax", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["Targmax"] = Targmax, ["padding"] = padding, ["include_batch_in_index"] = include_batch_in_index } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5018,6 +5255,10 @@ public static Tensor nth_element(Tensor input, Tensor n, bool reverse = false, s
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "NthElement", name) { args = new object[] { input, n }, attrs = new Dictionary<string, object>() { ["reverse"] = reverse } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5088,6 +5329,10 @@ public static Tensor[] quantized_avg_pool(Tensor input, Tensor min_input, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedAvgPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5174,6 +5419,10 @@ public static Tensor[] quantized_batch_norm_with_global_normalization(Tensor t,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBatchNormWithGlobalNormalization", name) { args = new object[] { t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["variance_epsilon"] = variance_epsilon, ["scale_after_normalization"] = scale_after_normalization } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5251,6 +5500,10 @@ public static Tensor[] quantized_bias_add(Tensor input, Tensor bias, Tensor min_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedBiasAdd", name) { args = new object[] { input, bias, min_input, max_input, min_bias, max_bias }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5344,6 +5597,10 @@ public static Tensor[] quantized_conv2d(Tensor input, Tensor filter, Tensor min_
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5420,6 +5677,10 @@ public static Tensor[] quantized_conv2d_and_relu(Tensor input, Tensor filter, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRelu", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5499,6 +5760,10 @@ public static Tensor[] quantized_conv2d_and_relu_and_requantize(Tensor input, Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndReluAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5580,6 +5845,10 @@ public static Tensor[] quantized_conv2d_and_requantize(Tensor input, Tensor filt
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DAndRequantize", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5662,6 +5931,10 @@ public static Tensor[] quantized_conv2d_per_channel(Tensor input, Tensor filter,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DPerChannel", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5739,6 +6012,10 @@ public static Tensor[] quantized_conv2d_with_bias(Tensor input, Tensor filter, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5818,6 +6095,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_relu(Tensor input, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5899,6 +6180,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_relu_and_requantize(Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -5982,6 +6267,10 @@ public static Tensor[] quantized_conv2d_with_bias_and_requantize(Tensor input, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6068,6 +6357,10 @@ public static Tensor[] quantized_conv2d_with_bias_signed_sum_and_relu_and_requan
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6153,6 +6446,10 @@ public static Tensor[] quantized_conv2d_with_bias_sum_and_relu(Tensor input, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, summand }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6238,6 +6535,10 @@ public static Tensor[] quantized_conv2d_with_bias_sum_and_relu_and_requantize(Te
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedConv2DWithBiasSumAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output, summand, min_summand, max_summand }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6322,6 +6623,10 @@ public static Tensor[] quantized_depthwise_conv2d(Tensor input, Tensor filter, T
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2D", name) { args = new object[] { input, filter, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6400,6 +6705,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias(Tensor input, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBias", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6484,6 +6793,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu(Tensor inpu
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndRelu", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6571,6 +6884,10 @@ public static Tensor[] quantized_depthwise_conv2d_with_bias_and_relu_and_requant
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", name) { args = new object[] { input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type, ["strides"] = strides, ["padding"] = padding, ["dilations"] = dilations, ["padding_list"] = padding_list } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6660,6 +6977,10 @@ public static Tensor[] quantized_mat_mul_with_bias(Tensor a, Tensor b, Tensor bi
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBias", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6735,6 +7056,10 @@ public static Tensor quantized_mat_mul_with_bias_and_dequantize(Tensor a, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndDequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6828,6 +7153,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_relu(Tensor a, Tensor b,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRelu", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6922,6 +7251,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_relu_and_requantize(Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndReluAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -6999,6 +7332,10 @@ public static Tensor[] quantized_mat_mul_with_bias_and_requantize(Tensor a, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMatMulWithBiasAndRequantize", name) { args = new object[] { a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output }, attrs = new Dictionary<string, object>() { ["Toutput"] = Toutput, ["transpose_a"] = transpose_a, ["transpose_b"] = transpose_b, ["input_quant_mode"] = input_quant_mode } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7083,6 +7420,10 @@ public static Tensor[] quantized_max_pool(Tensor input, Tensor min_input, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedMaxPool", name) { args = new object[] { input, min_input, max_input }, attrs = new Dictionary<string, object>() { ["ksize"] = ksize, ["strides"] = strides, ["padding"] = padding } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7140,6 +7481,10 @@ public static Tensor[] quantized_relu(Tensor features, Tensor min_features, Tens
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7195,6 +7540,10 @@ public static Tensor[] quantized_relu6(Tensor features, Tensor min_features, Ten
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedRelu6", name) { args = new object[] { features, min_features, max_features }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7251,6 +7600,10 @@ public static Tensor[] quantized_relu_x(Tensor features, Tensor max_value, Tenso
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "QuantizedReluX", name) { args = new object[] { features, max_value, min_features, max_features }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7312,6 +7665,10 @@ public static Tensor relu(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7361,6 +7718,10 @@ public static Tensor relu6(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Relu6", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7411,6 +7772,10 @@ public static Tensor relu_grad(Tensor gradients, Tensor features, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReluGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7472,6 +7837,10 @@ public static Tensor selu(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Selu", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7522,6 +7891,10 @@ public static Tensor selu_grad(Tensor gradients, Tensor outputs, string? name =
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SeluGrad", name) { args = new object[] { gradients, outputs }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7579,6 +7952,10 @@ public static Tensor softmax(Tensor logits, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softmax", name) { args = new object[] { logits }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7634,6 +8011,10 @@ public static Tensor[] softmax_cross_entropy_with_logits(Tensor features, Tensor
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7684,6 +8065,10 @@ public static Tensor softplus(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softplus", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7734,6 +8119,10 @@ public static Tensor softplus_grad(Tensor gradients, Tensor features, string? na
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftplusGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7784,6 +8173,10 @@ public static Tensor softsign(Tensor features, string? name = null)
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "Softsign", name) { args = new object[] { features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7834,6 +8227,10 @@ public static Tensor softsign_grad(Tensor gradients, Tensor features, string? na
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SoftsignGrad", name) { args = new object[] { gradients, features }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result[0];
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7895,6 +8292,10 @@ public static Tensor[] sparse_softmax_cross_entropy_with_logits(Tensor features,
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "SparseSoftmaxCrossEntropyWithLogits", name) { args = new object[] { features, labels }, attrs = new Dictionary<string, object>() { } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -7973,6 +8374,10 @@ public static Tensor[] top_k(Tensor input, int k = 0, bool sorted = true, string
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopK", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["k"] = k, ["sorted"] = sorted } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
@@ -8045,6 +8450,10 @@ public static Tensor[] top_kv2(Tensor input, Tensor k, bool sorted = true, strin
                 var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "TopKV2", name) { args = new object[] { input, k }, attrs = new Dictionary<string, object>() { ["sorted"] = sorted } });
                 return _fast_path_result;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
             catch (Exception)
             {
             }
diff --git a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs
index c4e8f8c41..db5f6813c 100644
--- a/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/gen_resource_variable_ops.cs
@@ -1,158 +1,1523 @@
-/*****************************************************************************
-   Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-******************************************************************************/
+/*Wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit.*/
 
+using Tensorflow.Eager;
+using Tensorflow.Contexts;
+using Tensorflow.Exceptions;
 using static Tensorflow.Binding;
 
-namespace Tensorflow
+namespace Tensorflow;
+
+public static class gen_resource_variable_ops
 {
-    public static class gen_resource_variable_ops
+    /// <summary>
+    /// Adds a value to the current value of a variable.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Any ReadVariableOp with a control dependency on this op is guaranteed to
+    /// see the incremented value or a subsequent newer one.
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="value"></param>
+    /// <returns></returns>
+    public static Operation assign_add_variable_op(Tensor resource, Tensor value, string? name = null)
     {
-        public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string name = null)
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
         {
-            if (tf.Context.executing_eagerly())
+            try
             {
-                tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(
-                    tf.Context, "AssignSubVariableOp", name, resource, value));
-
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignAddVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return assign_add_variable_op_eager_fallback(resource, value, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["value"] = value;
+        var _op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") };
+            _execute.record_gradient("AssignAddVariableOp", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            return null;
+    public static Operation assign_add_variable_op_eager_fallback(Tensor resource, Tensor value, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, value };
+        object[] _attrs = new object[] { "dtype", value.dtype };
+        var _result = _execute.execute("AssignAddVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("AssignAddVariableOp", _inputs_flat, _attrs, _result);
         }
+        return null;
+    }
+    /// <summary>
+    /// Subtracts a value from the current value of a variable.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Any ReadVariableOp with a control dependency on this op is guaranteed to
+    /// see the decremented value or a subsequent newer one.
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="value"></param>
+    /// <returns></returns>
+    public static Operation assign_sub_variable_op(Tensor resource, Tensor value, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignSubVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return assign_sub_variable_op_eager_fallback(resource, value, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["value"] = value;
+        var _op = tf.OpDefLib._apply_op_helper("AssignSubVariableOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") };
+            _execute.record_gradient("AssignSubVariableOp", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-        /// <summary>
-        /// Adds a value to the current value of a variable.
-        /// </summary>
-        /// <param name="resource"></param>
-        /// <param name="value"></param>
-        /// <param name="name"></param>
-        /// <returns></returns>
-        public static Operation assign_add_variable_op(Tensor resource, Tensor value, string name = null)
+    public static Operation assign_sub_variable_op_eager_fallback(Tensor resource, Tensor value, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, value };
+        object[] _attrs = new object[] { "dtype", value.dtype };
+        var _result = _execute.execute("AssignSubVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("AssignSubVariableOp", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Assigns a new value to a variable.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Any ReadVariableOp with a control dependency on this op is guaranteed to return
+    /// this value or a subsequent newer value of the variable.
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="value"></param>
+    /// <param name="validate_shape"></param>
+    /// <returns></returns>
+    public static Operation assign_variable_op(Tensor resource, Tensor value, bool validate_shape = false, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
         {
-            if (tf.Context.executing_eagerly())
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "AssignVariableOp", name) { args = new object[] { resource, value }, attrs = new Dictionary<string, object>() { ["validate_shape"] = validate_shape } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
             {
-                tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignAddVariableOp", name,
-                    resource, value));
+            }
+            try
+            {
+                return assign_variable_op_eager_fallback(resource, value, validate_shape: validate_shape, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["value"] = value;
+        keywords["validate_shape"] = validate_shape;
+        var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "validate_shape", _op._get_attr_bool("validate_shape") };
+            _execute.record_gradient("AssignVariableOp", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
+    public static Operation assign_variable_op_eager_fallback(Tensor resource, Tensor value, bool validate_shape, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, value };
+        object[] _attrs = new object[] { "dtype", value.dtype, "validate_shape", validate_shape };
+        var _result = _execute.execute("AssignVariableOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("AssignVariableOp", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// This op consumes a lock created by `MutexLock`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This op exists to consume a tensor created by `MutexLock` (other than
+    /// direct control dependencies).  It should be the only that consumes the tensor,
+    /// and will raise an error if it is not.  Its only purpose is to keep the
+    /// mutex lock tensor alive until it is consumed by this op.
+    /// 
+    /// **NOTE**: This operation must run on the same device as its input.  This may
+    /// be enforced via the `colocate_with` mechanism.
+    /// 
+    /// </remarks>
+    /// <param name="mutex_lock"></param>
+    /// <returns></returns>
+    public static Operation consume_mutex_lock(Tensor mutex_lock, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ConsumeMutexLock", name) { args = new object[] { mutex_lock }, attrs = new Dictionary<string, object>() { } });
                 return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return consume_mutex_lock_eager_fallback(mutex_lock, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["mutex_lock"] = mutex_lock;
+        var _op = tf.OpDefLib._apply_op_helper("ConsumeMutexLock", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("ConsumeMutexLock", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            var _op = tf.OpDefLib._apply_op_helper("AssignAddVariableOp", name, new { resource, value });
+    public static Operation consume_mutex_lock_eager_fallback(Tensor mutex_lock, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { mutex_lock };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("ConsumeMutexLock", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ConsumeMutexLock", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Deletes the resource specified by the handle.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// All subsequent operations using the resource will result in a NotFound
+    /// error status.
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="ignore_lookup_error">
+    /// 
+    /// whether to ignore the error when the resource
+    /// doesn't exist.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Operation destroy_resource_op(Tensor resource, bool ignore_lookup_error = true, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DestroyResourceOp", name) { args = new object[] { resource }, attrs = new Dictionary<string, object>() { ["ignore_lookup_error"] = ignore_lookup_error } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return destroy_resource_op_eager_fallback(resource, ignore_lookup_error: ignore_lookup_error, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["ignore_lookup_error"] = ignore_lookup_error;
+        var _op = tf.OpDefLib._apply_op_helper("DestroyResourceOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "ignore_lookup_error", _op._get_attr_bool("ignore_lookup_error") };
+            _execute.record_gradient("DestroyResourceOp", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            return _op;
+    public static Operation destroy_resource_op_eager_fallback(Tensor resource, bool ignore_lookup_error, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource };
+        object[] _attrs = new object[] { "ignore_lookup_error", ignore_lookup_error };
+        var _result = _execute.execute("DestroyResourceOp", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("DestroyResourceOp", _inputs_flat, _attrs, _result);
         }
+        return null;
+    }
+    /// <summary>
+    /// Turns off the copy-on-read mode.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect.  
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <returns></returns>
+    public static Operation disable_copy_on_read(Tensor resource, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "DisableCopyOnRead", name) { args = new object[] { resource }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return disable_copy_on_read_eager_fallback(resource, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        var _op = tf.OpDefLib._apply_op_helper("DisableCopyOnRead", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("DisableCopyOnRead", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-        public static Operation assign_variable_op(Tensor resource, Tensor value, string name = null)
+    public static Operation disable_copy_on_read_eager_fallback(Tensor resource, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("DisableCopyOnRead", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            if (tf.Context.executing_eagerly())
+            _execute.record_gradient("DisableCopyOnRead", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Locks a mutex resource.  The output is the lock.  So long as the lock tensor
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// is alive, any other request to use `MutexLock` with this mutex will wait.
+    /// 
+    /// This is particularly useful for creating a critical section when used in
+    /// conjunction with `MutexLockIdentity`:
+    /// 
+    /// ```python
+    /// 
+    /// mutex = mutex_v2(
+    ///   shared_name=handle_name, container=container, name=name)
+    /// 
+    /// def execute_in_critical_section(fn, *args, **kwargs):
+    ///   lock = gen_resource_variable_ops.mutex_lock(mutex)
+    /// 
+    ///   with ops.control_dependencies([lock]):
+    ///     r = fn(*args, **kwargs)
+    /// 
+    ///   with ops.control_dependencies(nest.flatten(r)):
+    ///     with ops.colocate_with(mutex):
+    ///       ensure_lock_exists = mutex_lock_identity(lock)
+    /// 
+    ///     # Make sure that if any element of r is accessed, all of
+    ///     # them are executed together.
+    ///     r = nest.map_structure(tf.identity, r)
+    /// 
+    ///   with ops.control_dependencies([ensure_lock_exists]):
+    ///     return nest.map_structure(tf.identity, r)
+    /// ```
+    /// 
+    /// While `fn` is running in the critical section, no other functions which wish to
+    /// use this critical section may run.
+    /// 
+    /// Often the use case is that two executions of the same graph, in parallel,
+    /// wish to run `fn`; and we wish to ensure that only one of them executes
+    /// at a time.  This is especially important if `fn` modifies one or more
+    /// variables at a time.
+    /// 
+    /// It is also useful if two separate functions must share a resource, but we
+    /// wish to ensure the usage is exclusive.
+    /// 
+    /// </remarks>
+    /// <param name="mutex"></param>
+    /// <returns></returns>
+    public static Tensor mutex_lock(Tensor mutex, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MutexLock", name) { args = new object[] { mutex }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
             {
-                tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "AssignVariableOp", name,
-                    resource, value));
+                return mutex_lock_eager_fallback(mutex, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["mutex"] = mutex;
+        var _op = tf.OpDefLib._apply_op_helper("MutexLock", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("MutexLock", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-                return null;
+    public static Tensor mutex_lock_eager_fallback(Tensor mutex, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { mutex };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("MutexLock", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("MutexLock", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Creates a Mutex resource that can be locked by `MutexLock`.
+    /// </summary>
+    /// <param name="container">
+    /// 
+    /// If non-empty, this variable is placed in the given container.
+    /// Otherwise, a default container is used.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// If non-empty, this variable is named in the given bucket
+    /// with this shared_name. Otherwise, the node name is used instead.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor mutex_v2(string container = "", string shared_name = "", string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "MutexV2", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
             }
+            try
+            {
+                return mutex_v2_eager_fallback(container: container, shared_name: shared_name, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        var _op = tf.OpDefLib._apply_op_helper("MutexV2", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name") };
+            _execute.record_gradient("MutexV2", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-            var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, new { resource, value });
+    public static Tensor mutex_v2_eager_fallback(string container, string shared_name, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { };
+        object[] _attrs = new object[] { "container", container, "shared_name", shared_name };
+        var _result = _execute.execute("MutexV2", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("MutexV2", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Reads the value of a variable.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// The tensor returned by this operation is immutable.
+    /// 
+    /// The value returned by this operation is guaranteed to be influenced by all the
+    /// writes on which this operation depends directly or indirectly, and to not be
+    /// influenced by any of the writes which depend directly or indirectly on this
+    /// operation.
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="dtype">
+    /// 
+    /// the dtype of the value.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ReadVariableOp", name) { args = new object[] { resource }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return read_variable_op_eager_fallback(resource, dtype: dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["dtype"] = dtype;
+        var _op = tf.OpDefLib._apply_op_helper("ReadVariableOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype") };
+            _execute.record_gradient("ReadVariableOp", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-            return _op;
+    public static Tensor read_variable_op_eager_fallback(Tensor resource, TF_DataType dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource };
+        object[] _attrs = new object[] { "dtype", dtype };
+        var _result = _execute.execute("ReadVariableOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ReadVariableOp", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Gather slices from the variable pointed to by `resource` according to `indices`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
+    /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
+    /// 
+    /// ```python
+    ///     # Scalar indices
+    ///     output[:, ..., :] = params[indices, :, ... :]
+    /// 
+    ///     # Vector indices
+    ///     output[i, :, ..., :] = params[indices[i], :, ... :]
+    /// 
+    ///     # Higher rank indices
+    ///     output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
+    /// ```
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="batch_dims"></param>
+    /// <param name="validate_indices"></param>
+    /// <param name="dtype"></param>
+    /// <returns></returns>
+    public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype, int batch_dims = 0, bool validate_indices = true, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceGather", name) { args = new object[] { resource, indices }, attrs = new Dictionary<string, object>() { ["batch_dims"] = batch_dims, ["validate_indices"] = validate_indices, ["dtype"] = dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_gather_eager_fallback(resource, indices, batch_dims: batch_dims, validate_indices: validate_indices, dtype: dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["batch_dims"] = batch_dims;
+        keywords["validate_indices"] = validate_indices;
+        keywords["dtype"] = dtype;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceGather", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "batch_dims", _op._get_attr_int("batch_dims"), "validate_indices", _op._get_attr_bool("validate_indices"), "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceGather", _op.inputs, _attrs, _result);
         }
+        return _result[0];
+    }
 
-        public static Tensor var_is_initialized_op(Tensor resource, string name = null)
+    public static Tensor resource_gather_eager_fallback(Tensor resource, Tensor indices, int batch_dims, bool validate_indices, TF_DataType dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices };
+        object[] _attrs = new object[] { "batch_dims", batch_dims, "validate_indices", validate_indices, "dtype", dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceGather", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            if (tf.Context.executing_eagerly())
+            _execute.record_gradient("ResourceGather", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// 
+    /// </summary>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="dtype"></param>
+    /// <returns></returns>
+    public static Tensor resource_gather_nd(Tensor resource, Tensor indices, TF_DataType dtype, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
             {
-                var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarIsInitializedOp", name,
-                    resource));
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceGatherNd", name) { args = new object[] { resource, indices }, attrs = new Dictionary<string, object>() { ["dtype"] = dtype } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_gather_nd_eager_fallback(resource, indices, dtype: dtype, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["dtype"] = dtype;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceGatherNd", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceGatherNd", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-                return results[0];
+    public static Tensor resource_gather_nd_eager_fallback(Tensor resource, Tensor indices, TF_DataType dtype, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices };
+        object[] _attrs = new object[] { "dtype", dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceGatherNd", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceGatherNd", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Adds sparse updates to the variable referenced by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] += updates[...]
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] += updates[i, ...]
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions add.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_add(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterAdd", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
             }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_add_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterAdd", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterAdd", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            var _op = tf.OpDefLib._apply_op_helper("VarIsInitializedOp", name, new { resource });
+    public static Operation resource_scatter_add_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterAdd", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterAdd", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Divides sparse updates into the variable referenced by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] /= updates[...]
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] /= updates[i, ...]
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions multiply.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_div(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterDiv", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_div_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterDiv", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterDiv", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            return _op.output;
+    public static Operation resource_scatter_div_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterDiv", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterDiv", _inputs_flat, _attrs, _result);
         }
+        return null;
+    }
+    /// <summary>
+    /// Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] = max(ref[indices, ...], updates[...])
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions are combined.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_max(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMax", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_max_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMax", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterMax", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-        /// <summary>
-        /// Creates a handle to a Variable resource.
-        /// </summary>
-        /// <param name="dtype"></param>
-        /// <param name="shape"></param>
-        /// <param name="container"></param>
-        /// <param name="shared_name"></param>
-        /// <param name="name"></param>
-        /// <returns></returns>
-        public static Tensor var_handle_op(TF_DataType dtype, Shape shape,
-            string container = "", string shared_name = "", string name = null)
+    public static Operation resource_scatter_max_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterMax", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            if (tf.Context.executing_eagerly())
+            _execute.record_gradient("ResourceScatterMax", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] = min(ref[indices, ...], updates[...])
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions are combined.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_min(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMin", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_min_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
             {
-                var results = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(tf.Context, "VarHandleOp", name)
-                {
-                    attrs = ConvertToDict(new
-                    {
-                        dtype,
-                        shape = shape.dims,
-                        container,
-                        shared_name,
-                        allowed_devices = new string[0]
-                    })
-                });
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMin", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterMin", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-                return results[0];
+    public static Operation resource_scatter_min_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterMin", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterMin", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Multiplies sparse updates into the variable referenced by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] *= updates[...]
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] *= updates[i, ...]
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions multiply.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_mul(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterMul", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
             }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_mul_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterMul", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterMul", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            var _op = tf.OpDefLib._apply_op_helper("VarHandleOp", name, new
+    public static Operation resource_scatter_mul_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterMul", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterMul", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Subtracts sparse updates from the variable referenced by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] -= updates[...]
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] -= updates[i, ...]
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
+    /// 
+    /// Duplicate entries are handled correctly: if multiple `indices` reference
+    /// the same location, their contributions add.
+    /// 
+    /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
+    /// 
+    /// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
+    /// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
+    /// </div>
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_sub(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterSub", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_sub_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
             {
-                dtype,
-                shape,
-                container,
-                shared_name
-            });
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterSub", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterSub", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-            return _op.output;
+    public static Operation resource_scatter_sub_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterSub", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterSub", _inputs_flat, _attrs, _result);
         }
+        return null;
+    }
+    /// <summary>
+    /// Assigns sparse updates to the variable referenced by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation computes
+    /// 
+    ///     # Scalar indices
+    ///     ref[indices, ...] = updates[...]
+    /// 
+    ///     # Vector indices (for each i)
+    ///     ref[indices[i], ...] = updates[i, ...]
+    /// 
+    ///     # High rank indices (for each i, ..., j)
+    ///     ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
+    /// 
+    /// </remarks>
+    /// <param name="resource"></param>
+    /// <param name="indices"></param>
+    /// <param name="updates"></param>
+    /// <returns></returns>
+    public static Operation resource_scatter_update(Tensor resource, Tensor indices, Tensor updates, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "ResourceScatterUpdate", name) { args = new object[] { resource, indices, updates }, attrs = new Dictionary<string, object>() { } });
+                return null;
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return resource_scatter_update_eager_fallback(resource, indices, updates, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        keywords["indices"] = indices;
+        keywords["updates"] = updates;
+        var _op = tf.OpDefLib._apply_op_helper("ResourceScatterUpdate", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "dtype", _op._get_attr_type("dtype"), "Tindices", _op._get_attr_type("Tindices") };
+            _execute.record_gradient("ResourceScatterUpdate", _op.inputs, _attrs, _result);
+        }
+        return _op;
+    }
 
-        public static Tensor destroy_resource_op(Tensor resource, bool ignore_lookup_error = true, string name = null)
-            => tf.Context.ExecuteOp("DestroyResourceOp", name, 
-                new ExecuteOpArgs(resource).SetAttributes(new { ignore_lookup_error }));
+    public static Operation resource_scatter_update_eager_fallback(Tensor resource, Tensor indices, Tensor updates, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource, indices, updates };
+        object[] _attrs = new object[] { "dtype", updates.dtype, "Tindices", indices.dtype };
+        var _result = _execute.execute("ResourceScatterUpdate", 0, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("ResourceScatterUpdate", _inputs_flat, _attrs, _result);
+        }
+        return null;
+    }
+    /// <summary>
+    /// Creates a handle to a Variable resource.
+    /// </summary>
+    /// <param name="container">
+    /// 
+    /// the container this variable is placed in.
+    /// 
+    /// </param>
+    /// <param name="shared_name">
+    /// 
+    /// the name by which this variable is referred to.
+    /// 
+    /// </param>
+    /// <param name="dtype">
+    /// 
+    /// the type of this variable. Must agree with the dtypes
+    /// of all ops using this variable.
+    /// 
+    /// </param>
+    /// <param name="shape">
+    /// 
+    /// The (possibly partially specified) shape of this variable.
+    /// 
+    /// </param>
+    /// <param name="allowed_devices">
+    /// 
+    /// DEPRECATED. The allowed devices containing the resource variable. Set when the
+    /// output ResourceHandle represents a per-replica/partitioned resource variable.
+    /// 
+    /// </param>
+    /// <returns></returns>
+    public static Tensor var_handle_op(TF_DataType dtype, Shape shape, string container = "", string shared_name = "", string[] allowed_devices = null, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (allowed_devices is null)
+        {
+            allowed_devices = new string[] { };
+        }
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VarHandleOp", name) { args = new object[] { }, attrs = new Dictionary<string, object>() { ["container"] = container, ["shared_name"] = shared_name, ["dtype"] = dtype, ["shape"] = shape, ["allowed_devices"] = allowed_devices } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return var_handle_op_eager_fallback(container: container, shared_name: shared_name, dtype: dtype, shape: shape, allowed_devices: allowed_devices, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        if (container is null)
+        {
+            container = "";
+        }
+        if (shared_name is null)
+        {
+            shared_name = "";
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["container"] = container;
+        keywords["shared_name"] = shared_name;
+        keywords["dtype"] = dtype;
+        keywords["shape"] = shape;
+        keywords["allowed_devices"] = allowed_devices;
+        var _op = tf.OpDefLib._apply_op_helper("VarHandleOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name"), "dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "allowed_devices", _op.get_attr("allowed_devices") };
+            _execute.record_gradient("VarHandleOp", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-        /// <summary>
-        /// Reads the value of a variable.
-        /// </summary>
-        /// <param name="resource"></param>
-        /// <param name="dtype"></param>
-        /// <param name="name"></param>
-        /// <returns></returns>
-        public static Tensor read_variable_op(Tensor resource, TF_DataType dtype, string name = null)
-        => tf.Context.ExecuteOp("ReadVariableOp", name, new ExecuteOpArgs(resource)
-            .SetAttributes(new { dtype }));
+    public static Tensor var_handle_op_eager_fallback(string container, string shared_name, TF_DataType dtype, Shape shape, string[] allowed_devices, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { };
+        object[] _attrs = new object[] { "container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape, "allowed_devices", allowed_devices };
+        var _result = _execute.execute("VarHandleOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("VarHandleOp", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Checks whether a resource handle-based variable has been initialized.
+    /// </summary>
+    /// <param name="resource"></param>
+    /// <returns></returns>
+    public static Tensor var_is_initialized_op(Tensor resource, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VarIsInitializedOp", name) { args = new object[] { resource }, attrs = new Dictionary<string, object>() { } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
+            {
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return var_is_initialized_op_eager_fallback(resource, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["resource"] = resource;
+        var _op = tf.OpDefLib._apply_op_helper("VarIsInitializedOp", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { };
+            _execute.record_gradient("VarIsInitializedOp", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-        public static Tensor resource_gather(Tensor resource, Tensor indices, TF_DataType dtype,
-            int batch_dims = 0, bool validate_indices = true, string name = null)
+    public static Tensor var_is_initialized_op_eager_fallback(Tensor resource, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { resource };
+        object[] _attrs = new object[] { };
+        var _result = _execute.execute("VarIsInitializedOp", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
         {
-            var _op = tf.OpDefLib._apply_op_helper("ResourceGather", name, new
+            _execute.record_gradient("VarIsInitializedOp", _inputs_flat, _attrs, _result);
+        }
+        return _result[0];
+    }
+    /// <summary>
+    /// Returns the shape of the variable pointed to by `resource`.
+    /// </summary>
+    /// <remarks>
+    /// 
+    /// This operation returns a 1-D integer tensor representing the shape of `input`.
+    /// 
+    /// For example:
+    /// 
+    /// ```
+    /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
+    /// shape(t) ==> [2, 2, 3]
+    /// ```
+    /// 
+    /// </remarks>
+    /// <param name="input"></param>
+    /// <param name="out_type"></param>
+    /// <returns></returns>
+    public static Tensor variable_shape(Tensor input, TF_DataType out_type = TF_DataType.TF_INT32, string? name = null)
+    {
+        var _ctx = tf.Context;
+        if (_ctx.executing_eagerly())
+        {
+            try
+            {
+                var _fast_path_result = tf.Runner.TFE_FastPathExecute(new FastPathOpExecInfo(_ctx, "VariableShape", name) { args = new object[] { input }, attrs = new Dictionary<string, object>() { ["out_type"] = out_type } });
+                return _fast_path_result[0];
+            }
+            catch (NotOkStatusException ex)
             {
-                resource,
-                indices,
-                dtype,
-                batch_dims,
-                validate_indices
-            });
+                throw ex;
+            }
+            catch (Exception)
+            {
+            }
+            try
+            {
+                return variable_shape_eager_fallback(input, out_type: out_type, name: name, ctx: _ctx);
+            }
+            catch (Exception)
+            {
+            }
+        }
+        Dictionary<string, object> keywords = new();
+        keywords["input"] = input;
+        keywords["out_type"] = out_type;
+        var _op = tf.OpDefLib._apply_op_helper("VariableShape", name, keywords);
+        var _result = _op.outputs;
+        if (_execute.must_record_gradient())
+        {
+            object[] _attrs = new object[] { "out_type", _op._get_attr_type("out_type") };
+            _execute.record_gradient("VariableShape", _op.inputs, _attrs, _result);
+        }
+        return _result[0];
+    }
 
-            return _op.output;
+    public static Tensor variable_shape_eager_fallback(Tensor input, TF_DataType out_type, string name, Context ctx)
+    {
+        Tensor[] _inputs_flat = new Tensor[] { input };
+        object[] _attrs = new object[] { "out_type", out_type };
+        var _result = _execute.execute("VariableShape", 1, inputs: _inputs_flat, attrs: _attrs, ctx: ctx, name: name);
+        if (_execute.must_record_gradient())
+        {
+            _execute.record_gradient("VariableShape", _inputs_flat, _attrs, _result);
         }
+        return _result[0];
     }
 }
diff --git a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
index 9d52f5161..126df9e42 100644
--- a/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
+++ b/src/TensorFlowNET.Core/Operations/image_ops_impl.cs
@@ -1778,10 +1778,10 @@ internal static Tensor _bbox_overlap(Tensor boxes_a, Tensor boxes_b)
             {
                 // a_y_min: [0], a_x_min: [1], a_y_max: [2], a_x_max[3]
                 var a_xy_minmax = array_ops.split(
-                    value: boxes_a, num_split: 4, axis: 2);
+                    value: boxes_a, num_or_size_splits: 4, axis: ops.convert_to_tensor(2));
                 // b_y_min: [0], b_x_min: [1], b_y_max: [2], b_x_max[3]    
                 var b_xy_minmax = array_ops.split(
-                    value: boxes_b, num_split: 4, axis: 2);
+                    value: boxes_b, num_or_size_splits: 4, axis: ops.convert_to_tensor(2));
 
                 var i_xmin = math_ops.maximum(
                     a_xy_minmax[1], array_ops.transpose(b_xy_minmax[1], new[] { 0, 2, 1 }));
@@ -1943,7 +1943,7 @@ public static (Tensor, Tensor) non_max_suppression_padded_v2(Tensor boxes, Tenso
                 using (ops.name_scope("canonicalize_coordinates"))
                 {
                     // y_1 = [0], x_1 = [1], y_2 = [2], x_2 = [3]
-                    var yx = array_ops.split(value: boxes, num_split: 4, axis: 2);
+                    var yx = array_ops.split(value: boxes, num_or_size_splits: 4, axis: ops.convert_to_tensor(2));
                     var y_1_is_min = math_ops.reduce_all(
                         gen_math_ops.less_equal(yx[0][0, 0, 0], yx[2][0, 0, 0]));
                     var y_minmax = control_flow_ops.cond(
diff --git a/src/TensorFlowNET.Core/Operations/list_ops.cs b/src/TensorFlowNET.Core/Operations/list_ops.cs
new file mode 100644
index 000000000..c5e83ee41
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/list_ops.cs
@@ -0,0 +1,111 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Eager;
+
+namespace Tensorflow.Operations
+{
+    internal class list_ops
+    {
+        private static void _set_handle_data(Tensor list_handle, Shape element_shape, TF_DataType element_dtype)
+        {
+            if(list_handle is EagerTensor eagerTensor)
+            {
+                var handle_data = new CppShapeInferenceResult.Types.HandleData();
+                handle_data.IsSet = true;
+                handle_data.ShapeAndType.Add(new CppShapeInferenceResult.Types.HandleShapeAndType()
+                {
+                    Shape = element_shape.as_proto(),
+                    Dtype = element_dtype.as_datatype_enum(),
+                    Type = new FullTypeDef() { TypeId = FullTypeId.TftArray }
+                });
+                list_handle.HandleData = handle_data;
+            }
+        }
+
+        private static Tensor _build_element_shape(Shape? shape)
+        {
+            if(shape is null || shape.IsNull)
+            {
+                return ops.convert_to_tensor(-1);
+            }
+            else
+            {
+                return ops.convert_to_tensor(shape);
+            }
+        }
+
+        public static Tensor tensor_list_reserve(Shape? shape, Tensor num_elements, TF_DataType element_dtype, string name = null)
+        {
+            var result = gen_list_ops.tensor_list_reserve(_build_element_shape(shape), num_elements, element_dtype, name);
+            _set_handle_data(result, shape, element_dtype);
+            return result;
+        }
+
+        public static Tensor tensor_list_from_tensor(Tensor tensor, Shape element_shape, string? name = null)
+        {
+            var result = gen_list_ops.tensor_list_from_tensor(tensor, _build_element_shape(element_shape), name);
+            _set_handle_data(result, tensor.shape, tensor.dtype);
+            return result;
+        }
+
+        public static Tensor tensor_list_get_item(Tensor input_handle, Tensor index, TF_DataType element_dtype, 
+            Shape? element_shape = null, string? name = null)
+        {
+            return gen_list_ops.tensor_list_get_item(input_handle, index, _build_element_shape(element_shape),
+                element_dtype, name);
+        }
+
+        public static Tensor tensor_list_set_item(Tensor input_handle, Tensor index, Tensor item,
+            bool resize_if_index_out_of_bounds = false, string? name = null)
+        {
+            if (resize_if_index_out_of_bounds)
+            {
+                var input_list_size = gen_list_ops.tensor_list_length(input_handle);
+                input_handle = control_flow_ops.cond(index >= input_list_size,
+                    () => gen_list_ops.tensor_list_resize(input_handle, index + 1),
+                    () => input_handle);
+            }
+            var output_handle = gen_list_ops.tensor_list_set_item(input_handle, index, item, name);
+            handle_data_util.copy_handle_data(input_handle, output_handle);
+            return output_handle;
+        }
+
+        public static Tensor tensor_list_stack(Tensor input_handle, TF_DataType element_dtype, int num_elements = -1, 
+            Shape? element_shape = null, string? name = null)
+        {
+            return gen_list_ops.tensor_list_stack(input_handle, _build_element_shape(element_shape), element_dtype, num_elements, name);
+        }
+
+        public static Tensor tensor_list_gather(Tensor input_handle, Tensor indices, TF_DataType element_dtype,
+            Shape? element_shape = null, string? name = null)
+        {
+            return gen_list_ops.tensor_list_gather(input_handle, indices, _build_element_shape(element_shape), element_dtype, name);
+        }
+
+        public static Tensor tensor_list_scatter(Tensor tensor, Tensor indices, Shape? element_shape = null, Tensor? input_handle = null, 
+            string? name = null)
+        {
+            if(input_handle is not null)
+            {
+                var output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(input_handle, tensor, indices, name);
+                handle_data_util.copy_handle_data(input_handle, output_handle);
+                return output_handle;
+            }
+            else
+            {
+                var output_handle = gen_list_ops.tensor_list_scatter_v2(tensor, indices, _build_element_shape(element_shape), 
+                    constant_op.constant(-1), name);
+                _set_handle_data(output_handle, element_shape, tensor.dtype);
+                return output_handle;
+            }
+        }
+
+        public static Tensor empty_tensor_list(Shape? element_shape, TF_DataType element_dtype, int max_num_elements = -1,
+            string? name = null)
+        {
+            return gen_list_ops.empty_tensor_list(_build_element_shape(element_shape), element_dtype: element_dtype,
+                max_num_elements: ops.convert_to_tensor(max_num_elements, dtype: dtypes.int32), name: name);
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Operations/logging_ops.cs b/src/TensorFlowNET.Core/Operations/logging_ops.cs
index e38e60b5b..3303cadc3 100644
--- a/src/TensorFlowNET.Core/Operations/logging_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/logging_ops.cs
@@ -30,7 +30,7 @@ public Tensor print_v2(Tensor input, string output_stream = "stderr", string end
                     name: name);
 
             return tf.Context.ExecuteOp("PrintV2", name, new ExecuteOpArgs(formatted_string)
-               .SetAttributes(new { output_stream, end }));
+               .SetAttributes(new { output_stream, end })).SingleOrNull;
         }
     }
 }
diff --git a/src/TensorFlowNET.Core/Operations/sort_ops.cs b/src/TensorFlowNET.Core/Operations/sort_ops.cs
index 34b903230..db38a073b 100644
--- a/src/TensorFlowNET.Core/Operations/sort_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/sort_ops.cs
@@ -44,7 +44,7 @@ public static Tensor argsort(Tensor values, Axis axis = null, string direction =
                 {
                     sorted = true
                 }));
-            return indices;
+            return indices.Single;
         }
 
         public static Tensor sort(Tensor values, Axis axis, string direction = "ASCENDING", string? name = null)
diff --git a/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs b/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs
index 7d2da544c..6be0706c2 100644
--- a/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs
+++ b/src/TensorFlowNET.Core/Operations/tensor_array_ops.cs
@@ -13,11 +13,23 @@ public class tensor_array_ops
         /// <returns></returns>
         public static TensorArray build_ta_with_new_flow(TensorArray old_ta, Tensor flow)
         {
-            var new_ta = tf.TensorArray(
-                dtype: old_ta.dtype,
-                infer_shape: old_ta.infer_shape,
+            if (!tf.Context.executing_eagerly() && old_ta is not _GraphTensorArrayV2 && control_flow_util.EnableControlFlowV2(ops.get_default_graph()))
+            {
+                throw new NotImplementedException("Attempting to build a graph-mode TF2-style "
+                                + "TensorArray from either an eager-mode "
+                                + "TensorArray or a TF1-style TensorArray.  "
+                                + "This is not currently supported.  You may be "
+                                + "attempting to capture a TensorArray "
+                                + "inside a tf.function or tf.data map function. "
+                                + "Instead, construct a new TensorArray inside "
+                                + "the function.");
+            }
+            var new_ta = TensorArray.Create(old_ta.dtype, handle: old_ta.handle, flow: flow, infer_shape: old_ta.infer_shape,
                 colocate_with_first_write_call: old_ta.colocate_with_first_write_call);
-
+            new_ta._dynamic_size = old_ta._dynamic_size;
+            new_ta._size = old_ta._size;
+            new_ta._colocate_with = old_ta._colocate_with;
+            new_ta._element_shape = old_ta._element_shape;
             return new_ta;
         }
 
diff --git a/src/TensorFlowNET.Core/Operations/while_v2.cs b/src/TensorFlowNET.Core/Operations/while_v2.cs
new file mode 100644
index 000000000..3f324f872
--- /dev/null
+++ b/src/TensorFlowNET.Core/Operations/while_v2.cs
@@ -0,0 +1,401 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Text;
+using Tensorflow.Common.Extensions;
+using Tensorflow.Common.Types;
+using Tensorflow.Eager;
+using Tensorflow.Framework;
+using Tensorflow.Framework.Models;
+using Tensorflow.Graphs;
+using static Tensorflow.Binding;
+
+namespace Tensorflow.Operations
+{
+    class _OperationWithOutputs : Operation
+    {
+        public _OperationWithOutputs(IntPtr handle, Graph g = null)
+        {
+            _handle = handle;
+            _graph = g;
+            _outputs = null;
+            g._add_op(this);
+        }
+    }
+    internal class while_v2
+    {
+        public static Tensor[] while_loop(Func<Tensors, Tensor> cond,
+            Func<Tensors, Tensors> body,
+            Tensors loop_vars,
+            int maximum_iterations = -1, 
+            int parallel_iterations = 10,
+            string name = null, 
+            bool back_prop = true, 
+            bool return_same_structure = true)
+        {
+            var orig_loop_vars = loop_vars;
+            var flat_orig_loop_vars = orig_loop_vars.Flatten().ToArray();
+            int len_orig_loop_vars = orig_loop_vars.Length;
+
+            loop_vars = _tensor_array_to_flow(loop_vars);
+            loop_vars = Nest.MapStructure(x => _convert_to_tensor_or_indexed_slices(x, TF_DataType.DtInvalid, null), loop_vars).ToTensors();
+
+            var loop_vars_signature = Nest.MapStructure(x => new TensorSpec(x.shape, x.dtype), _tensor_array_to_flow(loop_vars));
+
+            var flat_shape_invariants = Nest.Flatten(loop_vars_signature).Select(x => x.shape).ToArray();
+
+            if(string.IsNullOrEmpty(name))
+            {
+                name = "while";
+            }
+
+            return tf_with<ITensorFlowObject, Tensor[]>(ops.name_scope(name), nameScopeWhile =>
+            {
+                string scope = (nameScopeWhile as ops.NameScope).scope_name;
+                string cond_name = control_flow_util.unique_fn_name(scope, "cond");
+                string body_name = control_flow_util.unique_fn_name(scope, "body");
+
+                var maximum_iterations_loop_var = _build_maximum_iterations_loop_var(maximum_iterations);
+                var loop_counter = constant_op.constant(0, maximum_iterations == -1 ? TF_DataType.DtInvalid : maximum_iterations_loop_var.dtype,
+                    name: "loop_counter");
+                loop_vars = new Tensor[] { loop_counter, maximum_iterations_loop_var }.Concat(loop_vars).ToArray();
+
+                var func_graph_signature = new TensorSpec[] {TensorSpec.FromTensor(loop_counter),TensorSpec.FromTensor(maximum_iterations_loop_var)}
+                    .Concat(loop_vars_signature.Flatten()).ToArray();
+
+                // TODO(Rinne): possible wrong implemenation here.
+                var add_control_dependencies = false;
+
+                object[] wrapped_cond(object[] inputs)
+                {
+                    Tensor loop_counter = (Tensor)inputs[0];
+                    Tensor maximum_iterations_arg = (Tensor)inputs[1];
+                    Tensor[] args = inputs.Skip(2).Select(x => (Tensor)x).ToArray();
+                    var pred = cond(_pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, args));
+                    if(pred.shape.IsNull || pred.shape.ndim > 0)
+                    {
+                        pred = array_ops.squeeze(pred);
+                    }
+                    if(maximum_iterations == -1)
+                    {
+                        return new object[] { pred };
+                    }
+                    else
+                    {
+                        return new object[] { math_ops.logical_and(loop_counter < maximum_iterations_arg, pred) };
+                    }
+                }
+
+                var cond_graph = FuncGraph.func_graph_from_func(cond_name, wrapped_cond, null,
+                    null, signature: func_graph_signature, add_control_dependencies: add_control_dependencies);
+
+                bool stateful_parallelism = false;
+
+                object[] wrapped_body(object[] inputs)
+                {
+                    Tensor loop_counter = (Tensor)inputs[0];
+                    Tensor maximum_iterations_arg = (Tensor)inputs[1];
+                    Tensor[] args = inputs.Skip(2).Select(x => (Tensor)x).ToArray();
+
+                    _copy_handle_data(loop_vars.Flatten().Skip(2), args);
+
+                    foreach(var t in cond_graph.external_captures)
+                    {
+                        var graph = (FuncGraph)(ops.get_default_graph());
+                        graph.capture(t);
+                    }
+
+                    var outputs = body(_pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, args));
+                    outputs = _tensor_array_to_flow(outputs);
+
+                    return new object[] { loop_counter + 1, maximum_iterations_arg }.Concat(outputs).ToArray();
+                }
+
+                var body_graph = FuncGraph.func_graph_from_func(body_name, wrapped_body, null, null, func_graph_signature,
+                    add_control_dependencies: add_control_dependencies, acd_record_initial_resource_uses: stateful_parallelism);
+
+                // TODO(Rinne): possible wrong implementation here.
+                NestList<Tensors> loop_vars_list = new(new Tensors[] { loop_vars, body_graph.external_captures.ToTensors() });
+                body_graph.Outputs.AddRange(body_graph.internal_captures);
+                
+                cond_graph.as_default();
+                int num_cond_captures = cond_graph.external_captures.Length;
+                Debug.Assert(cond_graph.external_captures.SequenceEqual(body_graph.external_captures.Take(num_cond_captures).ToArray()));
+                _duplicate_body_captures_in_cond(cond_graph, body_graph.external_captures.Skip(num_cond_captures).ToArray());
+                cond_graph.Exit();
+
+                int first_loop_var_index = 2;
+
+                int num_flattened_oututs = orig_loop_vars.Length;
+                int num_original_outputs = body_graph.Outputs.Length;
+                if (back_prop && control_flow_util.output_all_intermediates())
+                {
+                    var intermediate_tensors = _get_intermediates(body_graph);
+
+                    foreach(var intermediate_tensor in intermediate_tensors)
+                    {
+                        var tensor_list = list_ops.empty_tensor_list(intermediate_tensor.shape, intermediate_tensor.dtype, maximum_iterations);
+                        loop_vars_list.Values.Add(tensor_list);
+
+                        cond_graph.as_default();
+                        cond_graph.capture(tensor_list);
+                        cond_graph.Exit();
+
+                        body_graph.as_default();
+                        var appended_tensor_list = gen_ops.tensor_list_push_back(tensor_list, intermediate_tensor);
+                        body_graph.Outputs.Add(appended_tensor_list);
+                        body_graph.Exit();
+                    }
+                }
+
+                List<Tensor> flattened_loop_vars = new();
+                foreach(var item in loop_vars_list.Values)
+                {
+                    flattened_loop_vars.AddRange(item.Flatten());
+                }
+                // skip the check
+
+                // TODO(Rinne): deal with control dependencies
+                var output_shapes = body_graph.Outputs.Select(t => t.shape).ToArray();
+                var span = new Span<Shape>(output_shapes).Slice(first_loop_var_index, num_flattened_oututs);
+                for(int i = 0; i < span.Length; i++)
+                {
+                    span[i] = flat_shape_invariants[i];
+                }
+
+                Tensor[] outputs = _build_while_op(flattened_loop_vars.ToArray(), cond_graph, body_graph, output_shapes, parallel_iterations,
+                    (nameScopeWhile as ops.NameScope).scope_name, num_original_outputs, stateful_parallelism);
+
+                if (!ops.get_default_graph().building_function)
+                {
+                    outputs = outputs.Select(t => array_ops.identity(t)).ToArray();
+                }
+
+                var output_loop_vars = outputs.Skip(first_loop_var_index).Take(num_flattened_oututs).ToArray();
+
+                if (!back_prop)
+                {
+                    output_loop_vars = output_loop_vars.Select(t => array_ops.stop_gradient(t)).ToArray();
+                }
+                outputs = _pack_sequence_as(loop_vars_signature, flat_orig_loop_vars, output_loop_vars);
+
+                return outputs;
+            });
+        }
+
+        private static Tensors _tensor_array_to_flow(Tensors loop_vars)
+        {
+            if(loop_vars.NestType == NestType.Node)
+            {
+                if(loop_vars.NodeValue is FakeTensorByTensorArray fake)
+                {
+                    return new Tensors(fake.TensorArray.flow);
+                }
+                else
+                {
+                    return new Tensors(loop_vars.NodeValue!);
+                }
+            }
+            else if(loop_vars.NestType == NestType.List)
+            {
+                List<INestStructure<Tensor>> list = new();
+                foreach(var item in loop_vars.ListValue!)
+                {
+                    if(item.NestType == NestType.Node)
+                    {
+                        var nested = item.AsNest();
+                        if (nested.NodeValue is FakeTensorByTensorArray fake)
+                        {
+                            list.Add(new Nest<Tensor>(fake.TensorArray.flow));
+                        }
+                        else
+                        {
+                            list.Add(new Nest<Tensor>(nested.NodeValue!));
+                        }
+                    }
+                    else
+                    {
+                        list.Add(new Nest<Tensor>(item.AsNest()));
+                    }
+                }
+                return Tensors.FromNest(new Nest<Tensor>(list));
+            }
+            else
+            {
+                throw new NotImplementedException();
+            }
+        }
+
+        private static Tensor[] _build_while_op(Tensor[] loop_vars, FuncGraph cond_graph, FuncGraph body_graph,
+            Shape[] output_shapes, int parallel_iterations, string name, int num_original_outputs, bool stateful_parallelism)
+        {
+            var cond_stateful_ops = cond_graph.get_operations().Select(x => x.op);
+            var body_stateful_ops = body_graph.get_operations().Select(x => x.op);
+
+            bool is_stateful = cond_stateful_ops.Count() > 0 || body_stateful_ops.Count() > 0;
+
+            Tensor[] _make_op(Tensor[] inputs)
+            {
+                Tensor[] outputs;
+                if (is_stateful)
+                {
+                    outputs = gen_functional_ops._while(
+                            inputs,
+                            control_flow_util.create_new_tf_function(cond_graph),
+                            control_flow_util.create_new_tf_function(body_graph),
+                            output_shapes,
+                            parallel_iterations,
+                            name
+                        );
+                }
+                else
+                {
+                    outputs = gen_functional_ops.stateless_while(
+                            inputs,
+                            control_flow_util.create_new_tf_function(cond_graph),
+                            control_flow_util.create_new_tf_function(body_graph),
+                            output_shapes,
+                            parallel_iterations,
+                            name
+                        );
+                }
+                var (while_op, tensors) = control_flow_util.get_op_and_outputs(outputs);
+                _copy_handle_data(body_graph.Outputs, tensors);
+                _set_read_only_resource_inputs_attr(while_op, new FuncGraph[]{cond_graph, body_graph});
+                while_op._set_attr("_num_original_outputs", new AttrValue() { I = num_original_outputs });
+                while_op._set_attr("_stateful_parallelism", new AttrValue() { B = stateful_parallelism });
+
+                cond_graph.outer_graph = ops.get_default_graph();
+                body_graph.outer_graph = ops.get_default_graph();
+                // TODO(Rinne): set the two graphs to while_op
+                return tensors;
+            }
+
+            return control_flow_util.run_as_function_for_tape_gradients(_make_op, loop_vars);
+        }
+
+        /// <summary>
+        /// Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies.
+        /// </summary>
+        /// <param name="op"></param>
+        /// <param name="branch_graphs"></param>
+        private static void _set_read_only_resource_inputs_attr(Operation op, FuncGraph[] branch_graphs)
+        {
+            List<int> read_only_indices = Enumerable.Range(0, op.inputs.Length).ToList();
+            foreach(var branch_graph in branch_graphs)
+            {
+                if (read_only_indices.Count == 0)
+                {
+                    break;
+                }
+                var branch_read_only_indices = auto_control_deps_utils.get_read_only_resource_input_indices_graph(branch_graph);
+                read_only_indices = read_only_indices.Intersect(branch_read_only_indices).ToList();
+            }
+            AttrValue.Types.ListValue listValue = new();
+            listValue.I.AddRange(read_only_indices.OrderBy(x => x).Select(x => (long)x));
+            op._set_attr(auto_control_deps_utils.READ_ONLY_RESOURCE_INPUTS_ATTR, new AttrValue()
+            {
+                List = listValue
+            });
+        }
+
+        private static Tensors _pack_sequence_as<T>(INestStructure<T> loop_vars_signature, Tensor[] flat_orig_loop_vars, Tensor[] loop_vars)
+        {
+            var flattened_loop_vars = zip(loop_vars, flat_orig_loop_vars).Select<(Tensor, Tensor), Tensor>(item =>
+            {
+                var (flow, y) = item;
+                if (y is FakeTensorByTensorArray ta)
+                {
+                    return new FakeTensorByTensorArray(tensor_array_ops.build_ta_with_new_flow(ta.TensorArray, flow));
+                }
+                else
+                {
+                    return flow;
+                }
+            }).ToArray();
+            return Nest.PackSequenceAs(loop_vars_signature, flattened_loop_vars).ToTensors();
+        }
+
+        private static Tensor[] _get_intermediates(FuncGraph func_graph)
+        {
+            List<Tensor> intermediates = new();
+            var reversed_captures = func_graph.captures.ToDictionary(x => x.Item2, x => x.Item1);
+
+            foreach(var op in func_graph.get_operations())
+            {
+                Debug.Assert(op is Operation);
+                var oper = (Operation)op;
+                if(oper.type == "Identity" || oper.type == "MutexLock")
+                {
+                    continue;
+                }
+                foreach(var o in  op.outputs)
+                {
+                    if(o != func_graph.Inputs[0] && o.dtype != dtypes.resource && !reversed_captures.ContainsKey(o))
+                    {
+                        intermediates.Add(o);
+                    }
+                }
+            }
+            return intermediates.ToArray();
+        }
+
+        private static void _duplicate_body_captures_in_cond(FuncGraph cond_graph, Tensor[] body_graph_captures)
+        {
+            var types = body_graph_captures.Select(t => t.dtype).ToList();
+            var c_graph = cond_graph.c_graph;
+            var placeholders = types.Select(x => CreatePlaceholder(c_graph, _build_cond_placeholders_name_prefix(cond_graph), x)).ToList();
+
+            var placeholder_ops = placeholders.Select(ph => new _OperationWithOutputs(ph.oper, cond_graph)).ToList();
+
+            List<Tensor> tensors = new();
+            foreach(var (op, ph, dtype) in zip(placeholder_ops, placeholders, types))
+            {
+                var tensor = Tensor._create_with_tf_output(op, 0, dtype, ph);
+                op._outputs = new Tensor[] { tensor };
+                tensors.Add(tensor);
+            }
+
+            var tuples = zip(body_graph_captures, tensors).ToList();
+            var keys = body_graph_captures.Select(t => t.Id).ToList();
+            cond_graph._captures.Update(zip(keys, tuples).ToDictionary(x => x.Item1, x => x.Item2));
+            cond_graph.Inputs.AddRange(tensors);
+        }
+
+        private static TF_Output CreatePlaceholder(SafeGraphHandle graph, string name, TF_DataType dtype)
+        {
+            var desc = c_api.TF_NewOperation(graph, "Placeholder", name);
+            c_api.TF_SetAttrType(desc, "dtype", dtype);
+            var op = c_api.TF_FinishOperation(desc, tf.Status);
+            tf.Status.Check(true);
+            var output = new TF_Output();
+            output.oper = op;
+            output.index = 0;
+            return output;
+        }
+
+        private static string _build_cond_placeholders_name_prefix(FuncGraph cond_graph)
+        {
+            return cond_graph.unique_name(cond_graph.Name + "___redundant_placeholder");
+        }
+
+        private static Tensor _convert_to_tensor_or_indexed_slices(Tensor value, TF_DataType dtype, 
+            string name)
+        {
+            return ops.convert_to_tensor(value, dtype, name, false);
+        }
+
+        private static Tensor _build_maximum_iterations_loop_var(int maximum_iterations = -1)
+        {
+            return ops.convert_to_tensor(maximum_iterations, dtypes.int32, "maximum_iterations");
+        }
+
+        private static void _copy_handle_data(IEnumerable<Tensor> src_tensors, IEnumerable<Tensor> dst_tensors)
+        {
+            foreach(var (src_t, dst_t) in zip(src_tensors, dst_tensors))
+            {
+                handle_data_util.copy_handle_data(src_t, dst_t);
+            }
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Core/Status/Status.cs b/src/TensorFlowNET.Core/Status/Status.cs
index a890c2aef..12b6fba2b 100644
--- a/src/TensorFlowNET.Core/Status/Status.cs
+++ b/src/TensorFlowNET.Core/Status/Status.cs
@@ -17,6 +17,7 @@ limitations under the License.
 using System;
 using System.Diagnostics;
 using System.Runtime.CompilerServices;
+using Tensorflow.Exceptions;
 using Tensorflow.Util;
 using static Tensorflow.c_api;
 
@@ -88,7 +89,7 @@ public void Check(bool throwException = false)
                         case TF_Code.TF_INVALID_ARGUMENT:
                             throw new InvalidArgumentError(message);
                         default:
-                            throw new TensorflowException(message);
+                            throw new NotOkStatusException(message);
                     }
                 }
             }
diff --git a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
index 09f5b0770..02578ec18 100644
--- a/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
+++ b/src/TensorFlowNET.Core/Tensorflow.Binding.csproj
@@ -111,7 +111,12 @@ https://tensorflownet.readthedocs.io</Description>
     <PackageReference Include="MethodBoundaryAspect.Fody" Version="2.0.148" />
     <PackageReference Include="Newtonsoft.Json" Version="13.0.3" />
     <PackageReference Include="OneOf" Version="3.0.223" />
-    <PackageReference Include="Protobuf.Text" Version="0.7.0" />
+    <PackageReference Include="Protobuf.Text" Version="0.7.1" />
     <PackageReference Include="Serilog.Sinks.Console" Version="4.1.0" />
   </ItemGroup>
+
+  <ItemGroup Condition="'$(TargetFramework)' == 'netstandard2.0'">
+    <PackageReference Include="IsExternalInit" Version="1.0.3" PrivateAssets="all" />
+    <PackageReference Include="System.Memory" Version="4.5.4" PrivateAssets="all" />
+  </ItemGroup>
 </Project>
diff --git a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
index 498ffda76..e7ff9f748 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensor.Creation.cs
@@ -105,6 +105,13 @@ public Tensor(Operation op, int value_index, TF_DataType dtype)
             _id = ops.uid();
         }
 
+        internal static Tensor _create_with_tf_output(Operation op, int value_index, TF_DataType dtype, TF_Output tf_output)
+        {
+            Tensor ret = new Tensor(op, value_index, dtype);
+            ret._tf_output = tf_output;
+            return ret;
+        }
+
         protected unsafe void InitTensor(Shape shape, TF_DataType dtype)
         {
             _handle = TF_NewTensor(shape, dtype, null);
diff --git a/src/TensorFlowNET.Core/Tensors/TensorArray.cs b/src/TensorFlowNET.Core/Tensors/TensorArray.cs
index fb59593ce..ff74956ac 100644
--- a/src/TensorFlowNET.Core/Tensors/TensorArray.cs
+++ b/src/TensorFlowNET.Core/Tensors/TensorArray.cs
@@ -14,7 +14,9 @@ You may obtain a copy of the License at
    limitations under the License.
 ******************************************************************************/
 
+using Tensorflow.Common.Types;
 using Tensorflow.Operations;
+using static Tensorflow.Binding;
 
 namespace Tensorflow
 {
@@ -44,5 +46,27 @@ public abstract class TensorArray : ITensorOrTensorArray
 
         public abstract Tensor stack(string name = null);
         public abstract Tensor gather(Tensor indices, string name = null);
+
+        internal bool _dynamic_size;
+        internal Tensor _size;
+        internal List<Tensor> _colocate_with;
+        internal Shape _element_shape;
+
+        public static TensorArray Create(TF_DataType dtype, Tensor size = null, bool dynamic_size = false,
+            bool clear_after_read = true, string tensor_array_name = null, Tensor handle = null, Tensor flow = null,
+            bool infer_shape = true, Shape? element_shape = null,
+            bool colocate_with_first_write_call = true, string name = null)
+        {
+            if (tf.Context.executing_eagerly() && (flow is null || flow.dtype != dtypes.variant))
+            {
+                return new _EagerTensorArray(dtype, size, dynamic_size, clear_after_read, tensor_array_name, handle, flow,
+                    infer_shape, element_shape, colocate_with_first_write_call, name);
+            }
+            else
+            {
+                return new _GraphTensorArrayV2(dtype, size, dynamic_size, clear_after_read, tensor_array_name, handle, flow,
+                    infer_shape, element_shape, colocate_with_first_write_call, name);
+            }
+        }
     }
 }
diff --git a/src/TensorFlowNET.Core/Tensors/Tensors.cs b/src/TensorFlowNET.Core/Tensors/Tensors.cs
index 8d382d619..2838b000d 100644
--- a/src/TensorFlowNET.Core/Tensors/Tensors.cs
+++ b/src/TensorFlowNET.Core/Tensors/Tensors.cs
@@ -3,6 +3,9 @@
 using System.Collections;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
+using Tensorflow.Operations;
+using Tensorflow.Common.Extensions;
 
 namespace Tensorflow
 {
@@ -13,157 +16,278 @@ namespace Tensorflow
     /// and Tensor[] from Tensors implicitily. 
     /// It works for tuple and scalar as well.
     /// </summary>
-    public class Tensors : IEnumerable<Tensor>, IDisposable
+    public sealed class Tensors : Nest<Tensor>, IDisposable
     {
-        List<Tensor> items = new List<Tensor>();
-
-        public TF_DataType dtype => items.First().dtype;
-        public Shape shape => items.First().shape;
-        public int rank => items.First().rank;
-        public Graph graph => items.First().graph;
+        public TF_DataType dtype => this.First().dtype; 
+        public Shape shape => this.First().shape;
+        public int rank => this.First().rank;
+        public Graph graph => this.First().graph;
         public bool IsList { get; set; }
-        public int Length => items.Count();
+        public int Length => this.Count();
+        /// <summary>
+        /// Return a Tensor if `Tensors` has only one tensor, otherwise throw an exception.
+        /// </summary>
+        public Tensor Single
+        {
+            get
+            {
+                if (Length != 1)
+                {
+                    throw new ValueError("Tensors with more than one tensor cannot be " +
+                        "implicitly converted to Tensor.");
+                }
+                return this.First();
+            }
+        }
 
-        public Tensor this[int index]
+        /// <summary>
+        /// Return a Tensor if `Tensors` has only one tensor, and return null when `Tensors` is empty, 
+        /// otherwise throw an exception.
+        /// </summary>
+        public Tensor? SingleOrNull
         {
-            get => items[index];
-            set => items[index] = value;
+            get
+            {
+                if (Length > 1)
+                {
+                    throw new ValueError($"Tensors with {Length} tensor cannot be " +
+                        "implicitly converted to Tensor.");
+                }
+                return this.FirstOrDefault();
+            }
         }
 
         public Tensor this[params string[] slices]
-            => items.First()[slices];
-        public Tensors(params Tensor[] tensors)
+            => this.First()[slices];
+
+        internal Tensors(Nest<Tensor> nested) : base(nested)
         {
-            items.AddRange(tensors);
+
         }
 
-        public Tensors(IEnumerable<Tensor> tensors)
+        public Tensors(params Tensor[] tensors): base(DealWithConstructorArrayInput(tensors))
         {
-            items.AddRange(tensors);
+            
         }
 
-        public Tensors(NDArray nd)
+        public Tensors(IList<Tensor> tensors) : base(tensors.Select(x => new Nest<Tensor>(x)))
         {
-            items.Add(ops.convert_to_tensor(nd));
+
         }
 
-        public IEnumerator<Tensor> GetEnumerator()
+        public Tensors(NDArray nd): base(ops.convert_to_tensor(nd))
         {
-            foreach (var tensor in items)
-                yield return tensor;
+            
         }
 
+        /// <summary>
+        /// Get the element in shallow level. For example, for ts = [1, [2, 3], 4], 
+        /// common indexer has ts[1] = 2. Shallow indexer has ts[1] = [2, 3]
+        /// </summary>
+        /// <param name="index"></param>
+        /// <returns></returns>
+        public Tensors GetShallow(int index)
+        {
+            if(NestType == NestType.Node)
+            {
+                if(index > 0)
+                {
+                    throw new IndexOutOfRangeException();
+                }
+                return this;
+            }
+            else if(NestType == NestType.List)
+            {
+                return ListValue![index].AsNest().ToTensors();
+            }
+            else
+            {
+                throw new NotImplementedException();
+            }
+        }
+
+        private static Nest<Tensor> DealWithConstructorArrayInput(Tensor[] tensors)
+        {
+            if (tensors.Length == 0)
+            {
+                return Nest<Tensor>.Empty;
+            }
+            else if(tensors.Length == 1)
+            {
+                return new Nest<Tensor>(tensors[0]);
+            }
+            else
+            {
+                return new Nest<Tensor>(tensors.Select(x => new Nest<Tensor>(x)));
+            }
+        }
+
+        public bool IsSingle()
+        {
+            return Length == 1;
+        }
+
+        public new Tensors MergeWith(Nest<Tensor>? other)
+        {
+            return FromNest(base.MergeWith(other));
+        }
+
+        [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to add " +
+            "a tensor to `Tensors`, creating a new instance with your newly added tensor is a better choice.")]
         public void Add(Tensor tensor)
-            => items.Add(tensor);
+        {
+            if(NestType == NestType.Dictionary)
+            {
+                throw new ValueError("Cannot add a tensor to dictionary type of nested tensors.");
+            }
+            else if(NestType == NestType.Node)
+            {
+                NestType = NestType.List;
+                ListValue = new() { new Nest<Tensor>(NodeValue), new Nest<Tensor>(tensor) };
+                NodeValue = null;
+            }
+            else if(NestType == NestType.List)
+            {
+                ListValue!.Add(new Nest<Tensor>(tensor));
+            }
+            else //Empty
+            {
+                NestType = NestType.Node;
+                NodeValue = tensor;
+            }
+        }
 
+        [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to add " +
+            "some tensors to `Tensors`, creating a new instance with your newly added tensors is a better choice.")]
         public void AddRange(IEnumerable<Tensor> tensors)
-            => items.AddRange(tensors);
+        {
+            if (NestType == NestType.Dictionary)
+            {
+                throw new ValueError("Cannot add a tensor to dictionary type of nested tensors.");
+            }
+            else if (NestType == NestType.Node)
+            {
+                NestType = NestType.List;
+                ListValue = new() { new Nest<Tensor>(NodeValue) };
+                ListValue.AddRange(tensors.Select(x => new Nest<Tensor>(x)));
+                NodeValue = null;
+            }
+            else if(NestType == NestType.List)
+            {
+                ListValue!.AddRange(tensors.Select(x => new Nest<Tensor>(x)));
+            }
+            else // empty
+            {
+                NestType = NestType.List;
+                ListValue = tensors.Select(x => new Nest<Tensor>(x) as INestStructure<Tensor>).ToList();
+            }
+        }
 
+        [Obsolete("This method is not encouraged to be used. It may be removed in the future. If you do want to insert " +
+            "a tensor to `Tensors`, creating a new instance with your newly added tensor is a better choice.")]
         public void Insert(int index, Tensor tensor)
-            => items.Insert(index, tensor);
-
-        IEnumerator IEnumerable.GetEnumerator()
-            => GetEnumerator();
+        {
+            if (NestType == NestType.List)
+            {
+                ListValue.Insert(index, new Nest<Tensor>(tensor));
+            }
+            else if(NestType == NestType.Node)
+            {
+                NestType = NestType.List;
+                ListValue = new() { new Nest<Tensor>(NodeValue) };
+                ListValue.Insert(index, new Nest<Tensor>(tensor));
+                NodeValue = null;
+            }
+            else
+            {
+                throw new ValueError("Cannot add a tensor to dictionary type of nested tensors.");
+            }
+        }
 
         public string[] StringData()
         {
-            EnsureSingleTensor(this, "nnumpy");
-            return this[0].StringData();
+            return Single.StringData();
         }
 
         public string StringData(int index)
         {
-            EnsureSingleTensor(this, "nnumpy");
-            return this[0].StringData(index);
+            return Single.StringData(index);
         }
 
         public NDArray numpy()
         {
-            EnsureSingleTensor(this, "nnumpy");
-            return this[0].numpy();
+            return Single.numpy();
         }
 
+        [Obsolete]
         public T[] ToArray<T>() where T: unmanaged
         {
-            EnsureSingleTensor(this, $"ToArray<{typeof(T)}>");
-            return this[0].ToArray<T>();
+            return Single.ToArray<T>();
         }
 
         #region Explicit Conversions
         public static explicit operator bool(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to bool");
-            return (bool)tensor[0];
+            return (bool)tensor.Single;
         }
 
         public static explicit operator sbyte(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to sbyte");
-            return (sbyte)tensor[0];
+            return (sbyte)tensor.Single;
         }
 
         public static explicit operator byte(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to byte");
-            return (byte)tensor[0];
+            return (byte)tensor.Single;
         }
 
         public static explicit operator ushort(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to ushort");
-            return (ushort)tensor[0];
+            return (ushort)tensor.Single;
         }
 
         public static explicit operator short(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to short");
-            return (short)tensor[0];
+            return (short)tensor.Single;
         }
 
         public static explicit operator int(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to int");
-            return (int)tensor[0];
+            return (int)tensor.Single;
         }
 
         public static explicit operator uint(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to uint");
-            return (uint)tensor[0];
+            return (uint)tensor.Single;
         }
 
         public static explicit operator long(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to long");
-            return (long)tensor[0];
+            return (long)tensor.Single;
         }
 
         public static explicit operator ulong(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to ulong");
-            return (ulong)tensor[0];
+            return (ulong)tensor.Single;
         }
 
         public static explicit operator float(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to byte");
-            return (byte)tensor[0];
+            return (byte)tensor.Single;
         }
 
         public static explicit operator double(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to double");
-            return (double)tensor[0];
+            return (double)tensor.Single;
         }
 
         public static explicit operator string(Tensors tensor)
         {
-            EnsureSingleTensor(tensor, "explicit conversion to string");
-            return (string)tensor[0];
+            return (string)tensor.Single;
         }
 
         public static explicit operator object[](Tensors tensors)
-            => tensors.items.ToArray();
+            => tensors.Flatten().ToArray();
         #endregion
 
         #region Implicit Conversions
@@ -183,56 +307,44 @@ public static implicit operator Tensors(Tensor[] tensors)
         public static implicit operator Tensors(List<Tensor> tensors)
             => new Tensors(tensors.ToArray());
 
-        public static implicit operator Tensor(Tensors tensors)
-            => tensors.FirstOrDefault();
+        public static implicit operator Tensor(Tensors? tensors)
+            => tensors?.SingleOrNull;
 
         public static implicit operator Tensor[](Tensors tensors)
-            => tensors.items.ToArray();
-
+            => tensors.Flatten().ToArray();
         #endregion
 
-        public void Deconstruct(out Tensor a, out Tensor b)
+        public static Tensors? FromNest(Nest<Tensor> nested)
         {
-            a = items[0];
-            b = items[1];
+            if(nested == Nest<Tensor>.Empty)
+            {
+                return null;
+            }
+            return new Tensors(nested);
         }
 
-        private static void EnsureSingleTensor(Tensors tensors, string methodnName)
+        public void Deconstruct(out Tensor a, out Tensors? b)
         {
-            if(tensors.Length == 0)
-            {
-                throw new ValueError($"Method `{methodnName}` of `Tensors` cannot be used when `Tensors` contains no Tensor.");
-            }
-            else if(tensors.Length > 1)
-            {
-                throw new ValueError($"Method `{methodnName}` of `Tensors` cannot be used when `Tensors` contains more than one Tensor.");
-            }
+            a = this.First();
+            b = Length == 1? null : new Tensors(this.Skip(1).ToArray());
         }
 
         public override string ToString()
         {
-            if(items.Count == 1)
+            if(Length == 1)
             {
-                return items[0].ToString();
+                return this.First().ToString();
             }
             else
             {
-                StringBuilder sb = new StringBuilder();
-                sb.Append($"Totally {items.Count} tensors, which are {string.Join(", ", items.Select(x => x.name))}\n[\n");
-                for(int i = 0; i < items.Count; i++)
-                {
-                    var tensor = items[i];
-                    sb.Append($"Tensor {i}({tensor.name}): {tensor.ToString()}\n");
-                }
-                sb.Append("]\n");
-                return sb.ToString();
+                return $"Totally {Length} tensors: {base.ToString()}";
             }
         }
 
         public void Dispose()
         {
-            foreach (var item in items)
-                item.Dispose();
+            foreach (var tensor in this)
+                tensor.Dispose();
         }
     }
 }
diff --git a/src/TensorFlowNET.Core/Training/Trackable.cs b/src/TensorFlowNET.Core/Training/Trackable.cs
index 2b5bf2a72..3eff34875 100644
--- a/src/TensorFlowNET.Core/Training/Trackable.cs
+++ b/src/TensorFlowNET.Core/Training/Trackable.cs
@@ -179,8 +179,7 @@ protected virtual IVariableV1 _add_variable_with_custom_getter(VariableArgs args
             // handles slot variables.
             if (!args.Overwrite || new_variable is RefVariable || new_variable is Trackable)
             {
-                var temp = new_variable as Trackable;
-                var res = _track_trackable(temp, args.Name, args.Overwrite);
+                var res = _track_trackable(new_variable as Trackable, args.Name, args.Overwrite);
                 Debug.Assert(res is IVariableV1);
                 return res as IVariableV1;
             }
diff --git a/src/TensorFlowNET.Core/Util/nest.py.cs b/src/TensorFlowNET.Core/Util/nest.py.cs
index eb94f4d05..3ba3ce78b 100644
--- a/src/TensorFlowNET.Core/Util/nest.py.cs
+++ b/src/TensorFlowNET.Core/Util/nest.py.cs
@@ -36,6 +36,7 @@ namespace Tensorflow.Util
     //  (np.array([3, 4]), tf.constant([3, 4])))`
     //
 
+    [Obsolete]
     public static class nest
     {
 
diff --git a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs
index b9a7022a2..a54283bd4 100644
--- a/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs
+++ b/src/TensorFlowNET.Core/Variables/BaseResourceVariable.cs
@@ -170,11 +170,28 @@ public IVariableV1 assign_lazy_load(Tensor value, string name = null)
         public Tensor value()
             => GraphElement ?? _read_variable_op();
 
-        protected Tensor _read_variable_op()
+        protected Tensor _read_variable_op(bool no_copy = false)
         {
             variable_accessed(this);
-            var result = gen_resource_variable_ops.read_variable_op(handle, _dtype);
-            resource_variable_ops._maybe_set_handle_data(_dtype, handle, result);
+
+            Tensor read_and_set_handle(bool no_copy)
+            {
+                if (no_copy)
+                {
+                    gen_resource_variable_ops.disable_copy_on_read(handle);
+                }
+                var result = gen_resource_variable_ops.read_variable_op(handle, _dtype);
+                resource_variable_ops._maybe_set_handle_data(_dtype, handle, result);
+                return result;
+            }
+
+            // TODO(Rinne): deal with caching device.
+            var result = read_and_set_handle(no_copy);
+            if (!tf.Context.executing_eagerly())
+            {
+                tf.Runner.TFE_TapeSetRecordOperation("ReadVariableOp", new Tensor[] { result }, new Tensor[] { handle },
+                        backward_function: (x, _) => x);
+            }
 
             // have to set shape when converting to substituent placeholder
             if (result.shape.ndim == -1)
diff --git a/src/TensorFlowNET.Core/ops.cs b/src/TensorFlowNET.Core/ops.cs
index 6d1385ca4..fb9bccf31 100644
--- a/src/TensorFlowNET.Core/ops.cs
+++ b/src/TensorFlowNET.Core/ops.cs
@@ -576,7 +576,7 @@ public static bool inside_function()
         public static HandleData get_resource_handle_data(Tensor graph_op)
         {
             var handle_data = c_api.TFC_GetHandleShapeAndType(graph_op.graph.c_graph, graph_op._as_tf_output());
-            return HandleData.Parser.ParseFrom(tf.compat.as_bytes(c_api.StringPiece(handle_data)));
+            return HandleData.Parser.ParseFrom(c_api.ByteStringPiece(handle_data));
         }
 
         public static void dismantle_graph(Graph graph)
diff --git a/src/TensorFlowNET.Keras/BackendImpl.cs b/src/TensorFlowNET.Keras/BackendImpl.cs
index 80403ad6a..8dbcf90d5 100644
--- a/src/TensorFlowNET.Keras/BackendImpl.cs
+++ b/src/TensorFlowNET.Keras/BackendImpl.cs
@@ -20,8 +20,12 @@ limitations under the License.
 using System.Collections.Generic;
 using Tensorflow.Functions;
 using Tensorflow.Graphs;
+using Tensorflow.Common.Extensions;
 using static Tensorflow.Binding;
 using static Tensorflow.Graphs.SubGraphUtility;
+using Tensorflow.Util;
+using Tensorflow.Common.Types;
+using System.Diagnostics;
 
 namespace Tensorflow.Keras
 {
@@ -450,5 +454,526 @@ public Tensor conv2d_transpose(Tensor x,
 
             return x;
         }
+
+        public (Tensors, Tensors, Tensors) rnn(
+           Func<Tensors, Tensors, (Tensors, Tensors)> step_function, // args:inputs, states, return:output, new_states
+           Tensors inputs, // inputs is a tuple of tensors (one per input sequence)
+           Tensors initial_states,
+           bool go_backwards = false,
+           Tensor? mask = null,
+           Tensors? constants = null,
+           bool unroll = false,
+           Tensors? input_length = null, // An integer or a 1-D Tensor,depending on whether the time dimension is fixed-length or not
+           bool time_major = false,
+           bool zero_output_for_mask = false,
+           bool return_all_outputs = true)
+        {
+
+            Tensor swap_batch_timestep(Tensor input_t)
+            {
+                var axes = Enumerable.Range(0, input_t.rank).ToArray();
+                axes[0] = 1;
+                axes[1] = 0;
+                return tf.transpose(input_t, axes);
+            }
+
+            if (!time_major)
+            {
+                inputs = Nest.MapStructure(swap_batch_timestep, inputs).ToTensors();
+            }
+
+            var flatted_inptus = Nest.Flatten(inputs).ToList();
+            var first_flatted_input = flatted_inptus[0];
+            var time_steps = first_flatted_input.shape[0];
+            var batch = first_flatted_input.shape[1];
+            var time_steps_t = tf.shape(first_flatted_input)[0];
+
+            foreach (var input_ in flatted_inptus)
+            {
+                input_.shape.with_rank_at_least(3);
+            }
+
+            if (mask != null)
+            {
+                if (mask.dtype != TF_DataType.TF_BOOL)
+                {
+                    mask = tf.cast(mask, TF_DataType.TF_BOOL);
+                }
+
+                if (mask.rank == 2)
+                {
+                    mask = tf.expand_dims(mask, -1);
+                }
+
+                if (!time_major)
+                {
+                    mask = swap_batch_timestep(mask);
+                }
+
+            }
+            
+            // tf.where needs its condition tensor to be the same shape as its two
+            // result tensors, but in our case the condition (mask) tensor is
+            // (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
+            // So we need to broadcast the mask to match the shape of inputs.
+            // That's what the tile call does, it just repeats the mask along its
+            // second dimension n times.
+
+            Tensors _expand_mask(Tensors mask_t, Tensors input_t, int fixed_dim = 1)
+            {
+                if (!mask_t.IsSingle())
+                {
+                    throw new ValueError($"mask_t is expected to be tensor, but got {mask_t}");
+                }
+
+                if (!input_t.IsSingle())
+                {
+                    throw new ValueError($"input_t is expected to be tensor, but got {input_t}");
+                }
+
+                var rank_diff = input_t.rank - mask_t.rank;
+                for (int i = 0; i < rank_diff; i++)
+                {
+                    mask_t = tf.expand_dims(mask_t, -1);
+                }
+                var multiples = Enumerable.Repeat(1, fixed_dim).ToArray().concat(input_t.shape.as_int_list().Skip(fixed_dim).ToArray());
+                return tf.tile(mask_t, multiples);
+            }
+
+            Tensors outputs = new Tensors();
+            Tensors output_time_zero = new Tensors();
+            Tensors last_output = new Tensors();
+            Tensors new_states = new Tensors();
+            if (unroll)
+            {
+                if (time_steps == 0)
+                {
+                    throw new ValueError("Unrolling requires a fixed number of timesteps.");
+                }
+
+                // Process the input tensors. The input tensor need to be split on the
+                // time_step dim, and reverse if go_backwards is True. In the case of
+                // nested input, the input is flattened and then transformed
+                // individually.  The result of this will be a tuple of lists, each of
+                // the item in tuple is list of the tensor with shape (batch, feature)
+
+
+                // TODO(Wanglongzhi2001),step_func接受的第二个参数为List,但是最后却用的tuple
+                //var states = Tuple.Create(initial_states);
+                var states = initial_states;
+
+                var successive_states = new Tensors();
+                var successive_outputs = new Tensors();
+
+                // Process the input tensors. The input tensor need to be split on the
+                // time_step dim, and reverse if go_backwards is True. In the case of
+                // nested input, the input is flattened and then transformed
+                // individually.  The result of this will be a tuple of lists, each of
+                // the item in tuple is list of the tensor with shape (batch, feature)
+
+                Tensors _process_single_input_t(Tensor input_t)
+                {
+                    var unstaked_input_t = array_ops.unstack(input_t); // unstack for time_step dim
+                    if (go_backwards)
+                    {
+                        unstaked_input_t = unstaked_input_t.Reverse().ToArray();
+                    }
+                    return unstaked_input_t;
+                }
+
+                // TODO(Wanglongzhi2001)
+                Tensors processed_input;
+                if (!inputs.IsSingle())
+                {
+                    processed_input = inputs.MapStructure(_process_single_input_t).ReduceTo<Tensors, Tensor>().ToTensors();
+                }
+                else
+                {
+                    processed_input = _process_single_input_t(inputs);
+                }
+
+                object _get_input_tensor(int time)
+                {
+                    List<Tensor> inp = new List<Tensor>();
+                    foreach (var t_ in processed_input)
+                    {
+                        inp.Add(t_[time]);
+                    }
+                    return Nest.PackSequenceAs(inputs, inp);
+                }
+
+                if (mask != null)
+                {
+                    var mask_list = tf.unstack(mask);
+                    if (go_backwards)
+                    {
+                        mask_list.Reverse().ToArray();
+                    }
+
+                    for (int i = 0; i < time_steps; i++)
+                    {
+                        // TODO(Wanglongzhi2001),deal with _get_input_tensor
+                        var inp = _get_input_tensor(i);
+                        var mask_t = mask_list[i];
+                        // TODO
+                        var (output, newStates) = step_function((Tensors)inp, states.MergeWith(constants));
+
+                        var tiled_mask_t = _expand_mask(mask_t, output);
+
+                        Tensors prev_output;
+                        if (successive_outputs == null)
+                        {
+                            prev_output = tf.zeros_like(output);
+                        }
+                        else
+                        {
+                            prev_output = successive_outputs.Last();
+                        }
+
+                        // output could be a tensor
+                        output = tf.where(tiled_mask_t, output, prev_output);
+
+                        var flat_states = Nest.Flatten(states).ToList();
+                        var flat_new_states = Nest.Flatten(newStates).ToList();
+
+                        var tiledMaskT = flat_states
+                            .Select(s => _expand_mask(mask_t, s))
+                            .ToArray();
+                        var tuple = Tuple.Create(tiledMaskT);
+
+                        List<Tensor> flat_final_states = new List<Tensor>();
+                        foreach (var (m, s, ps) in zip(tiled_mask_t.ToList(), flat_new_states, flat_states))
+                        {
+                            flat_final_states.Add(tf.where(m, s, ps));
+                        }
+
+                        states = Nest.PackSequenceAs(states, flat_final_states).ToTensors();
+                        if (return_all_outputs)
+                        {
+                            successive_outputs = successive_outputs.MergeWith(output);
+                            successive_outputs = successive_states.MergeWith(states);
+                        }
+                        else
+                        {
+                            successive_outputs = new Tensors(output);
+                            successive_states = new Tensors(states);
+                        }
+
+                    }
+                    last_output = successive_outputs.Last();
+                    new_states = successive_states.Last();
+                    outputs = tf.stack(successive_outputs);
+
+                    if (zero_output_for_mask)
+                    {
+                        last_output = tf.where(_expand_mask(mask_list.Last(), last_output), last_output, tf.zeros_like(last_output));
+                        outputs = tf.where(_expand_mask(mask, outputs, fixed_dim: 2), outputs, tf.zeros_like(outputs));
+                    }
+                    else // mask is null
+                    {
+                        for (int i = 0; i < time_steps; i++)
+                        {
+                            var inp = _get_input_tensor(i);
+                            var (output, newStates) = step_function((Tensors)inp, states.MergeWith(constants));
+                            states = newStates;
+
+                            if (return_all_outputs)
+                            {
+                                successive_outputs.Add(output);
+                                successive_states.Add(newStates);
+                            }
+                            else
+                            {
+                                successive_outputs = new Tensors { output };
+                                successive_states = new Tensors { newStates };
+                            }
+                        }
+                        last_output = successive_outputs.Last();
+                        new_states = successive_states.Last();
+                        outputs = tf.stack(successive_outputs);
+                    }
+                }
+            }
+            else // unroll == false
+            {
+                var states = initial_states;
+                //  Create input tensor array, if the inputs is nested tensors, then it
+                //  will be flattened first, and tensor array will be created one per
+                //  flattened tensor.
+
+
+                var input_ta = new List<TensorArray>();
+                for (int i = 0; i < flatted_inptus.Count; i++)
+                {
+                    input_ta.Add(TensorArray.Create(dtype: flatted_inptus[i].dtype, size: time_steps_t));
+                }
+
+                foreach(var (ta, input_) in zip(input_ta, flatted_inptus))
+                {
+                    if (!go_backwards)
+                    {
+                        ta.unstack(input_);
+                    }
+                    else
+                    {
+                        ta.unstack(reverse(input_, 0));
+                    }
+                }
+
+
+                // Get the time(0) input and compute the output for that, the output will
+                // be used to determine the dtype of output tensor array. Don't read from
+                // input_ta due to TensorArray clear_after_read default to True.
+                var input_time_zero = Nest.PackSequenceAs(inputs, flatted_inptus.Select(x => x[0]).ToArray()).ToTensors();
+
+                // output_time_zero is used to determine the cell output shape and its
+                // dtype.  the value is discarded.
+                (output_time_zero, _) = step_function(input_time_zero, 
+                    constants is null ? initial_states : initial_states.MergeWith(constants));
+
+                Tensor output_ta_size = return_all_outputs ? time_steps_t : constant_op.constant(1);
+                var output_ta = new List<TensorArray>();
+                foreach(var output in output_time_zero.Flatten())
+                {
+                    output_ta.Add(TensorArray.Create(dtype: output.dtype, size: output_ta_size, element_shape: output.shape));
+                }
+
+                var time = tf.constant(0, dtype: TF_DataType.TF_INT32, name: "time");
+
+                Func<Tensor, Tensor>? masking_fn;
+                Func<Tensors, Tensors, Tensors, Tensors>? compute_masked_output = null;
+                if (mask != null)
+                {
+                    if (go_backwards)
+                    {
+                        mask = tf.reverse(mask, axis: new[] { 0 });
+                    }
+                    var mask_ta = TensorArray.Create(dtype: TF_DataType.TF_BOOL, size: time_steps_t);
+                    mask_ta = mask_ta.unstack(mask);
+
+                    masking_fn = (time) =>
+                    {
+                        return mask_ta.read(time);
+                    };
+
+                    compute_masked_output = (mask_t, flat_out, flat_mask) =>
+                    {
+                        var tiled_mask_t = new Tensors();
+                        foreach (var o in flat_out)
+                        {
+                            tiled_mask_t.Add(_expand_mask(mask_t, o, fixed_dim: mask_t.rank));
+                        }
+
+                        Tensors res = new Tensors();
+                        foreach (var (m, o, fm) in zip(tiled_mask_t.ToList(), flat_out.ToList(), flat_mask.ToList()))
+                        {
+                            res.Add(tf.where(m, o, fm));
+                        }
+                        return res;
+                    };
+                }
+                // TODO(Wanglongzhi2001), what the input_length's type should be(an integer or a single tensor), it could be an integer or tensor
+                else if (input_length is Tensor)
+                {
+                    if (go_backwards)
+                    {
+                        var max_len = tf.reduce_max(input_length, axis: 0);
+                        var rev_input_length = tf.subtract(max_len - 1, input_length);
+
+                        masking_fn = (time) =>
+                        {
+                            return tf.less(rev_input_length, time);
+                        };
+                    }
+                    else
+                    {
+                        masking_fn = (time) =>
+                        {
+                            return tf.greater(input_length, time);
+                        };
+                    }
+
+                    compute_masked_output = (mask_t, flat_out, flat_mask) =>
+                    {
+                        var res = new List<Tensor>();
+                        foreach (var (o, zo) in zip(flat_out, flat_mask))
+                        {
+                            res.Add(tf.where(mask_t, o, zo));
+                        }
+                        return res;
+                    };
+                }
+                else
+                {
+                    masking_fn = null;
+                }
+
+                Func<Tensors, Tensor> cond = (time) => (time[0] < time_steps_t);
+                int parallel_iterations = 32;
+                Tensors final_outputs;
+                if (masking_fn != null)
+                {
+                    // Mask for the T output will be base on the output of T - 1. In the
+                    // case T = 0, a zero filled tensor will be used.
+                    var flat_zero_output = new Tensors();
+                    foreach (var o in Nest.Flatten(output_time_zero))
+                    {
+                        flat_zero_output.Add(tf.zeros_like(o));
+                    }
+
+                    var prev_output = flat_zero_output;
+                    var output_ta_t = output_ta;
+                    Tensors _step(Tensors tensors)
+                    {
+                        /*
+                         RNN step function.
+                         Args:
+                            time: Current timestep value.
+                            output_ta_t: TensorArray.
+                            prev_output: tuple of outputs from time - 1.
+                            *states: List of states.
+                         Returns:
+                            Tuple(todo): `(time + 1, output_ta_t, output) + tuple(new_states)`                          
+                         */
+
+                        Tensor time = tensors[0];
+                        TensorArray output_ta_t = (tensors[1] as FakeTensorByTensorArray).TensorArray;
+                        Tensors prev_output = tensors.GetShallow(2);
+                        Tensors states = new Tensors(tensors.Skip(2 + prev_output.Length).ToArray());
+
+                        var flat_current_input = input_ta.Select(x => x.read(time)).ToList();
+                        // maybe set shape
+                        // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type
+                        var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors();
+                        var mask_t = masking_fn(time);
+                        var (output, new_states) = step_function(current_input, states.MergeWith(constants));
+                        // mask output
+                        var flat_output = Nest.Flatten(output).ToList();
+
+                        var flat_mask_output = zero_output_for_mask ? flat_zero_output : prev_output.Flatten().ToList();
+
+                        // TODO(Wanglongzhi2001),deal with compute_masked_output's third parameter's type
+                        var flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output);
+
+                        // mask states
+                        var flat_state = states.Flatten().ToList();
+                        var flat_new_state = new_states.Flatten().ToList();
+
+                        foreach (var (state, new_state) in zip(flat_state, flat_new_state))
+                        {
+                            if (new_state is Tensor)
+                            {
+                                new_state.shape = state.shape;
+                            }
+                        }
+
+                        var flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state);
+                        new_states = Nest.PackSequenceAs(new_states, flat_final_state.ToArray()).ToTensors();
+
+                        var ta_index_to_write = return_all_outputs ? time : tf.constant(0);
+                        Debug.Assert(flat_output.Count() == 1);
+                        output_ta_t = output_ta_t.write(ta_index_to_write, flat_new_output.First());
+
+                        return new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta_t) }.Concat(flat_new_output).Concat(new_states)
+                            .ToArray().ToTensors();
+
+                    }
+                    var loop_vars = new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta[0]) }
+                            .Concat(flat_zero_output.Flatten()).Concat(states).ToArray().ToTensors();
+                    final_outputs = control_flow_ops.while_loop(cond: cond, body: _step, loop_vars: loop_vars, parallel_iterations: parallel_iterations);
+                    new_states = final_outputs.Skip(3).ToList();
+                }
+                else
+                {
+                    var output_ta_t = output_ta;
+                    new_states = states;
+                    Tensors _step(Tensors tensors)
+                    {
+                        Tensor time = tensors[0];
+                        TensorArray output_ta_t = (tensors[1] as FakeTensorByTensorArray).TensorArray;
+                        Tensors states = new Tensors(tensors.Skip(2).ToArray());
+                        var flat_current_input = input_ta.Select(x => x.read(time)).ToList();
+                        // maybe set shape
+                        // TODO(Wanglongzhi2001),deal with nest.pack_sequence_as's return type
+                        var current_input = Nest.PackSequenceAs(inputs, flat_current_input).ToTensors();
+                        var (output, new_states) = step_function(current_input, states.MergeWith(constants));
+                        var flat_state = new_states.Flatten().ToList();
+                        var flat_new_state = new_states.Flatten().ToList();
+                        foreach (var (state, new_state) in zip(flat_state, flat_new_state))
+                        {
+                            if (new_state is Tensor)
+                            {
+                                new_state.shape = state.shape;
+                            }
+                        }
+                        var flat_output = Nest.Flatten(output);
+                        var ta_index_to_write = return_all_outputs ? time : tf.constant(0);
+                        Debug.Assert(flat_output.Count() == 1);
+                        output_ta_t = output_ta_t.write(ta_index_to_write, flat_output.First());
+
+                        new_states = Nest.PackSequenceAs(initial_states, flat_new_state).ToTensors();
+                        return new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta_t) }.Concat(new_states).ToArray().ToTensors();
+                    }
+                    Debug.Assert(output_ta.Count == 1);
+                    var loop_vars = new Tensor[] { time + 1, new FakeTensorByTensorArray(output_ta[0]) }.Concat(states).ToArray().ToTensors();
+                    final_outputs = control_flow_ops.while_loop(cond: cond, body: _step, loop_vars: loop_vars, parallel_iterations: parallel_iterations);
+                    new_states = final_outputs.Skip(2).ToList();
+                }
+
+                output_ta = new List<TensorArray> { (final_outputs[1] as FakeTensorByTensorArray).TensorArray };
+                outputs = outputs.MergeWith(output_ta.Select(o => o.stack()).ToArray().ToTensors());
+                last_output = last_output.MergeWith(outputs.Select(o => o[-1]).ToArray().ToTensors());
+                outputs = Nest.PackSequenceAs(output_time_zero, (Tensor[])outputs).ToTensors();
+                last_output = Nest.PackSequenceAs(output_time_zero, (Tensor[])last_output).ToTensors();
+            }
+
+            Func<Tensor, Tensor> set_shape;
+            set_shape = (output_) =>
+            {
+                if (output_ is Tensor)
+                {
+                    var shape = output_.shape.as_int_list();
+                    if (return_all_outputs)
+                    {
+                        shape[0] = (int)time_steps;
+                    }
+                    else
+                    {
+                        shape[0] = 1;
+                    }
+                    shape[1] = (int)batch;
+                    output_.shape = shape;
+                }
+                return output_;
+            };
+
+            outputs = Nest.MapStructure(set_shape, outputs).ToTensors();
+            if (!time_major)
+            {
+                outputs = Nest.MapStructure(swap_batch_timestep, outputs).ToTensors();
+            }
+            return (last_output, outputs, new_states);
+
+        }
+
+        public Tensor reverse(Tensor input, int axis)
+        {
+            return reverse(input, new int[] { axis });
+        }
+
+        public Tensor reverse(Tensor input, int[] axes)
+        {
+            return tf.reverse(input, axes);
+        }
+
+        public Tensor maybe_convert_to_ragged(bool is_ragged_output, Tensor output, int nested_row_lengths, bool go_backwards = false)
+        {
+            if (!is_ragged_output)
+            {
+                return output;
+            }
+
+            throw new NotImplementedException("Not implemented currently, please submit an issue to https://github.com/SciSharp/TensorFlow.NET/issues");
+        }
     }
 }
diff --git a/src/TensorFlowNET.Keras/Engine/Functional.cs b/src/TensorFlowNET.Keras/Engine/Functional.cs
index e768bd0bd..7347585f8 100644
--- a/src/TensorFlowNET.Keras/Engine/Functional.cs
+++ b/src/TensorFlowNET.Keras/Engine/Functional.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Saving.SavedModel;
 using Tensorflow.Keras.Utils;
@@ -81,7 +82,7 @@ protected void _init_graph_network(Tensors inputs, Tensors outputs)
             }
             else
             {
-                _buildInputShape = new Saving.TensorShapeConfig();
+                _buildInputShape = new TensorShapeConfig();
             }
 
             if (outputs.Any(x => x.KerasHistory == null))
@@ -325,7 +326,7 @@ void BuildMapHelper(Tensor tensor,
             nodes_in_decreasing_depth.append(node);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var tensor_dict = new Dictionary<long, Queue<Tensor>>();
             // map input values
diff --git a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs
index c04304580..d52190fd3 100644
--- a/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs
+++ b/src/TensorFlowNET.Keras/Engine/Layer.Apply.cs
@@ -1,4 +1,5 @@
 using System.Threading;
+using Tensorflow.Common.Types;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Keras.Engine
@@ -8,11 +9,11 @@ public partial class Layer
         /// <summary>
         /// Wraps `call`, applying pre- and post-processing steps.
         /// </summary>
-        /// <param name="input"></param>
+        /// <param name="inputs"></param>
         /// <param name="state"></param>
         /// <param name="training"></param>
         /// <returns></returns>
-        public Tensors Apply(Tensors inputs, Tensor state = null, bool training = false)
+        public virtual Tensors Apply(Tensors inputs, Tensors states = null, bool training = false, IOptionalArgs? optional_args = null)
         {
             if (callContext.Value == null)
                 callContext.Value = new CallContext();
@@ -30,13 +31,15 @@ public Tensors Apply(Tensors inputs, Tensor state = null, bool training = false)
             if (!built)
                 MaybeBuild(inputs);
 
-            var outputs = Call(inputs, state: state, training: training);
+            var outputs = Call(inputs, state: states, training: training);
 
             // memory leak
             // _set_connectivity_metadata_(inputs, outputs);
             _handle_activity_regularization(inputs, outputs);
             _set_mask_metadata(inputs, outputs, null);
 
+            // TODO(Rinne): set save spec if null
+
             scope.__exit__();
 
             return outputs;
diff --git a/src/TensorFlowNET.Keras/Engine/Layer.cs b/src/TensorFlowNET.Keras/Engine/Layer.cs
index 5942efd92..2f758a850 100644
--- a/src/TensorFlowNET.Keras/Engine/Layer.cs
+++ b/src/TensorFlowNET.Keras/Engine/Layer.cs
@@ -32,7 +32,7 @@ limitations under the License.
 using static Tensorflow.Binding;
 using Tensorflow.Framework;
 using Tensorflow.Sessions;
-
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Engine
 {
@@ -332,7 +332,7 @@ private Tensor compute_mask(Tensor inputs, Tensor mask = null)
         /// <param name="state"></param>
         /// <param name="training"></param>
         /// <returns></returns>
-        protected virtual Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected virtual Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if(ReplacedCall is not null)
             {
diff --git a/src/TensorFlowNET.Keras/Engine/Model.Build.cs b/src/TensorFlowNET.Keras/Engine/Model.Build.cs
index 69afdef90..233363832 100644
--- a/src/TensorFlowNET.Keras/Engine/Model.Build.cs
+++ b/src/TensorFlowNET.Keras/Engine/Model.Build.cs
@@ -23,7 +23,7 @@ public override void build(KerasShapesWrapper input_shape)
                 var graph = tf.executing_eagerly() ? new FuncGraph("build_graph") : keras.backend.get_graph();
                 graph.as_default();
                 var shapes = input_shape.ToShapeArray();
-                var x = new Tensors(shapes.Select(x => base_layer_utils.generate_placeholders_from_shape(x)));
+                var x = new Tensors(shapes.Select(x => base_layer_utils.generate_placeholders_from_shape(x)).ToArray());
                 try
                 {
                     Call(x, training: false);
diff --git a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs
index 912f5e06d..eaa9eb23c 100644
--- a/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs
+++ b/src/TensorFlowNET.Keras/Engine/Model.Evaluate.cs
@@ -72,7 +72,7 @@ public Dictionary<string, float> evaluate(IEnumerable<Tensor> x, Tensor y, int v
         {
             var data_handler = new DataHandler(new DataHandlerArgs
             {
-                X = new Tensors(x),
+                X = new Tensors(x.ToArray()),
                 Y = y,
                 Model = this,
                 StepsPerExecution = _steps_per_execution
@@ -168,7 +168,8 @@ Dictionary<string, float> test_function(DataHandler data_handler, Tensor[] data)
         Dictionary<string, float> test_step_multi_inputs_function(DataHandler data_handler, Tensor[] data)
         {
             var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount;
-            var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size)));
+            var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray()));
+            tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1));
             return outputs;
         }
     }
diff --git a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs
index 17ecde984..68dc5976c 100644
--- a/src/TensorFlowNET.Keras/Engine/Model.Fit.cs
+++ b/src/TensorFlowNET.Keras/Engine/Model.Fit.cs
@@ -110,7 +110,7 @@ public ICallback fit(IEnumerable<NDArray> x, NDArray y,
 
             var data_handler = new DataHandler(new DataHandlerArgs
             {
-                X = new Tensors(train_x),
+                X = new Tensors(train_x.ToArray()),
                 Y = train_y,
                 BatchSize = batch_size,
                 InitialEpoch = initial_epoch,
diff --git a/src/TensorFlowNET.Keras/Engine/Model.Train.cs b/src/TensorFlowNET.Keras/Engine/Model.Train.cs
index 905ea453a..48c16e181 100644
--- a/src/TensorFlowNET.Keras/Engine/Model.Train.cs
+++ b/src/TensorFlowNET.Keras/Engine/Model.Train.cs
@@ -21,7 +21,7 @@ Dictionary<string, float> train_step_multi_inputs_function(DataHandler data_hand
         {
             var data = iterator.next();
             var x_size = data_handler.DataAdapter.GetDataset().FirstInputTensorCount;
-            var outputs = train_step(data_handler, new Tensors(data.Take(x_size)), new Tensors(data.Skip(x_size)));
+            var outputs = train_step(data_handler, new Tensors(data.Take(x_size).ToArray()), new Tensors(data.Skip(x_size).ToArray()));
             tf_with(ops.control_dependencies(new object[0]), ctl => _train_counter.assign_add(1));
             return outputs;
         }
diff --git a/src/TensorFlowNET.Keras/Engine/Model.cs b/src/TensorFlowNET.Keras/Engine/Model.cs
index 83702b23a..7b35d5477 100644
--- a/src/TensorFlowNET.Keras/Engine/Model.cs
+++ b/src/TensorFlowNET.Keras/Engine/Model.cs
@@ -1,8 +1,8 @@
 using System.Diagnostics;
+using Tensorflow.Common.Types;
 using Tensorflow.Framework.Models;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Losses;
-using Tensorflow.Keras.Saving;
 using Tensorflow.Keras.Saving.SavedModel;
 using Tensorflow.Keras.Utils;
 using Tensorflow.Train;
diff --git a/src/TensorFlowNET.Keras/Engine/Sequential.cs b/src/TensorFlowNET.Keras/Engine/Sequential.cs
index 278747515..6a468ad27 100644
--- a/src/TensorFlowNET.Keras/Engine/Sequential.cs
+++ b/src/TensorFlowNET.Keras/Engine/Sequential.cs
@@ -21,6 +21,7 @@ limitations under the License.
 using Tensorflow.Keras.Layers;
 using Tensorflow.Keras.Utils;
 using static Tensorflow.KerasApi;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Engine
 {
@@ -143,7 +144,7 @@ public void add(ILayer layer)
             }
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (!_has_explicit_input_shape)
             {
diff --git a/src/TensorFlowNET.Keras/IsExternalInit.cs b/src/TensorFlowNET.Keras/IsExternalInit.cs
new file mode 100644
index 000000000..11f062fa8
--- /dev/null
+++ b/src/TensorFlowNET.Keras/IsExternalInit.cs
@@ -0,0 +1,4 @@
+namespace System.Runtime.CompilerServices
+{
+    internal static class IsExternalInit { }
+}
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs b/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs
index 739c0d56f..23f36c862 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/ELU.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -29,7 +30,7 @@ public override void build(KerasShapesWrapper input_shape)
             base.build(input_shape);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor output = inputs;
             output = tf.where(output > 0f, output,
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs b/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs
index 17636302f..81fefb314 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Exponential.cs
@@ -4,7 +4,7 @@
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
-using static Tensorflow.Binding;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers {
     public class Exponential : Layer
@@ -17,7 +17,7 @@ public override void build(KerasShapesWrapper input_shape)
         {
             base.build(input_shape);
         }
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor output = inputs;
             return tf.exp(output);
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs b/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs
index b498d1b94..e0f91380b 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/HardSigmoid.cs
@@ -3,6 +3,7 @@
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Keras.Layers {
@@ -10,7 +11,7 @@ public class HardSigmoid : Layer {
             public HardSigmoid ( LayerArgs args ) : base(args) {
                   // hard sigmoid has no arguments
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null ) {
                   Tensor x = inputs;
                   return tf.clip_by_value(
                       tf.add(tf.multiply(x, 0.2f), 0.5f), 0f, 1f);
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs b/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs
index 1fbbf4eaf..cfbd0186d 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/LeakyReLu.cs
@@ -3,6 +3,7 @@
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Keras.Layers
@@ -19,7 +20,7 @@ public LeakyReLu(LeakyReLuArgs args) : base(args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             return tf.nn.leaky_relu(inputs, alpha: alpha);
         }
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs b/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs
index 53101fbb4..2e943d5f7 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/SELU.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -22,7 +23,7 @@ public override void build(KerasShapesWrapper input_shape) {
                 }
                 base.build(input_shape);
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
                   Tensor output = inputs;
                   return tf.where(output > 0f,
                         tf.multiply(scale, output),
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs
index 3ffae27f6..d018128d5 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Softmax.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using static Tensorflow.Binding;
@@ -11,8 +12,8 @@ public class Softmax : Layer {
             public Softmax ( SoftmaxArgs args ) : base(args) {
                   axis = args.axis;
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
-                  Tensor x = inputs.Length == 2 ? inputs + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9)
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
+                  Tensor x = inputs.Length == 2 ? inputs[0] + ((1.0 - tf.cast(inputs[1], inputs.dtype)) * 1e-9)
                                                 : inputs;
                   Tensor e = tf.exp(tf.sub(x, tf.reduce_max(x, axis: this.axis, keepdims: true)));
                   Tensor s = tf.reduce_sum(e, axis: this.axis, keepdims: true);
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs
index e82b01982..1e6c59b42 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Softplus.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using static Tensorflow.Binding;
@@ -10,7 +11,7 @@ public class Softplus : Layer {
             public Softplus ( LayerArgs args ) : base(args) {
                   // Softplus has no arguments
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
                   Tensor x = inputs;
                   return tf.log(
                         tf.add(tf.exp(x), 1f));
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs b/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs
index 59329fd44..5ad33e99d 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Softsign.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using static Tensorflow.Binding;
@@ -10,7 +11,7 @@ public class Softsign : Layer {
             public Softsign ( LayerArgs args ) : base(args) {
                   // Softsign has no arguments
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
                   Tensor x = inputs;
                   // x / (abs(x) + 1)
                   return tf.div(x, tf.add(1f, tf.abs(x)));
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs b/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs
index 1dcb92b31..ed0d105a6 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Swish.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Text;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using static Tensorflow.Binding;
@@ -10,7 +11,7 @@ public class Swish : Layer {
             public Swish ( LayerArgs args ) : base(args) {
                   // Swish has no arguments
             }
-            protected override Tensors Call ( Tensors inputs, Tensor state = null, bool? training = null ) {
+            protected override Tensors Call ( Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null) {
                   Tensor x = inputs;
 
                   // x / (1 + exp(-x))
diff --git a/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs b/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs
index 99b803942..7e90cf9d8 100644
--- a/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs
+++ b/src/TensorFlowNET.Keras/Layers/Activation/Tanh.cs
@@ -3,6 +3,7 @@
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Keras.Layers
@@ -13,7 +14,7 @@ public Tanh(LayerArgs args) : base(args)
         {
             // Tanh has no arguments
         }
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor x = inputs;
 
diff --git a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs
index 1348e19cf..19b292727 100644
--- a/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs
+++ b/src/TensorFlowNET.Keras/Layers/Attention/BaseDenseAttention.cs
@@ -6,6 +6,7 @@
 using System.Collections.Generic;
 using System.Linq;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 /// <summary>
 /// Base class for attention layers that can be used in sequence DNN/CNN models.
@@ -114,7 +115,7 @@ public virtual Tensor _calculate_scores(Tensor query, Tensor key) =>
             return (tf.linalg.einsum("bij,bjk->bik", (weights, value)), weights);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensors _inp;
             Tensors _mask = null;
diff --git a/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs b/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs
index 701724d5b..75dd4a41a 100644
--- a/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs
+++ b/src/TensorFlowNET.Keras/Layers/Attention/MultiHeadAttention.cs
@@ -6,6 +6,7 @@
 using static Tensorflow.KerasApi;
 using System;
 using System.Linq;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -252,7 +253,7 @@ public Tensors _compute_attention(
             return (attention_output, attention_scores);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensors _inp;
             Tensor _mask = null;
@@ -349,7 +350,7 @@ protected Tensors call(Tensors inputs,
             //}
 
             if (return_attention_scores)
-                return (attention_output, attention_scores);
+                return (attention_output, attention_scores.Single);
             return attention_output;
         }
     }
diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs
index bbd49acd2..94ad79141 100644
--- a/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs
+++ b/src/TensorFlowNET.Keras/Layers/Convolution/Conv2DTranspose.cs
@@ -20,6 +20,7 @@ limitations under the License.
 using Tensorflow.Keras.Utils;
 using static Tensorflow.KerasApi;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -83,7 +84,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var inputs_shape = array_ops.shape(inputs);
             var batch_size = inputs_shape[0];
diff --git a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
index c575362c0..d8e00d520 100644
--- a/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
+++ b/src/TensorFlowNET.Keras/Layers/Convolution/Convolutional.cs
@@ -17,6 +17,7 @@ limitations under the License.
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -103,7 +104,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = false)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = false, IOptionalArgs? optional_args = null)
         {
             var outputs = _convolution_op.Apply(inputs, kernel.AsTensor());
             if (use_bias)
diff --git a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
index aa6617ddc..db5d626ed 100644
--- a/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
+++ b/src/TensorFlowNET.Keras/Layers/Core/Dense.cs
@@ -18,6 +18,7 @@ limitations under the License.
 using System.Collections.Generic;
 using System.Diagnostics;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -69,7 +70,7 @@ public override void build(KerasShapesWrapper input_shape)
             built = true;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor outputs = null;
             var rank = inputs.rank;
diff --git a/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs b/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs
index fb604f77e..0cbd50846 100644
--- a/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs
+++ b/src/TensorFlowNET.Keras/Layers/Core/EinsumDense.cs
@@ -7,6 +7,7 @@
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.ArgsDefinition.Core;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -189,7 +190,7 @@ public override Shape ComputeOutputShape(Shape input_shape)
         //    return new dict(base_config.items().ToList() + config.items().ToList());
         //}
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var ret = tf.linalg.einsum(this.equation, (inputs, this.kernel.AsTensor()));
             if (this.bias != null)
diff --git a/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs b/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs
index 9487a7d00..87b42bb7b 100644
--- a/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs
+++ b/src/TensorFlowNET.Keras/Layers/Core/Embedding.cs
@@ -15,6 +15,7 @@ limitations under the License.
 ******************************************************************************/
 
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -66,7 +67,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var dtype = inputs.dtype;
             if (dtype != tf.int32 && dtype != tf.int64)
diff --git a/src/TensorFlowNET.Keras/Layers/LayersApi.cs b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
index 3b095bc2a..0bdcbc841 100644
--- a/src/TensorFlowNET.Keras/Layers/LayersApi.cs
+++ b/src/TensorFlowNET.Keras/Layers/LayersApi.cs
@@ -685,6 +685,32 @@ public ILayer LeakyReLU(float alpha = 0.3f)
                 Alpha = alpha
             });
 
+
+        public IRnnCell SimpleRNNCell(
+            int units,
+            string activation = "tanh",
+            bool use_bias = true,
+            string kernel_initializer = "glorot_uniform",
+            string recurrent_initializer = "orthogonal",
+            string bias_initializer = "zeros",
+            float dropout = 0f,
+            float recurrent_dropout = 0f)
+            => new SimpleRNNCell(new SimpleRNNCellArgs
+            {
+                Units = units,
+                Activation = keras.activations.GetActivationFromName(activation),
+                UseBias = use_bias,
+                KernelInitializer = GetInitializerByName(kernel_initializer),
+                RecurrentInitializer = GetInitializerByName(recurrent_initializer),
+                BiasInitializer = GetInitializerByName(bias_initializer),
+                Dropout = dropout,
+                RecurrentDropout = recurrent_dropout
+            });
+
+        public IRnnCell StackedRNNCells(
+            IEnumerable<IRnnCell> cells)
+            => new StackedRNNCells(cells.ToList(), new StackedRNNCellsArgs());
+
         /// <summary>
         /// 
         /// </summary>
@@ -709,6 +735,80 @@ public ILayer SimpleRNN(int units,
                     ReturnState = return_state
                 });
 
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="cell"></param>
+        /// <param name="return_sequences"></param>
+        /// <param name="return_state"></param>
+        /// <param name="go_backwards"></param>
+        /// <param name="stateful"></param>
+        /// <param name="unroll"></param>
+        /// <param name="time_major"></param>
+        /// <returns></returns>
+        public ILayer RNN(
+            IRnnCell cell,
+            bool return_sequences = false,
+            bool return_state = false,
+            bool go_backwards = false,
+            bool stateful = false,
+            bool unroll = false,
+            bool time_major = false)
+            => new RNN(cell, new RNNArgs
+            {
+                ReturnSequences = return_sequences,
+                ReturnState = return_state,
+                GoBackwards = go_backwards,
+                Stateful = stateful,
+                Unroll = unroll,
+                TimeMajor = time_major
+            });
+
+        public ILayer RNN(
+            IEnumerable<IRnnCell> cell,
+            bool return_sequences = false,
+            bool return_state = false,
+            bool go_backwards = false,
+            bool stateful = false,
+            bool unroll = false,
+            bool time_major = false)
+            => new RNN(cell, new RNNArgs
+            {
+                ReturnSequences = return_sequences,
+                ReturnState = return_state,
+                GoBackwards = go_backwards,
+                Stateful = stateful,
+                Unroll = unroll,
+                TimeMajor = time_major
+            });
+
+
+        public IRnnCell LSTMCell(int uints,
+            string activation = "tanh",
+            string recurrent_activation = "sigmoid",
+            bool use_bias = true,
+            string kernel_initializer = "glorot_uniform",
+            string recurrent_initializer = "orthogonal", // TODO(Wanglongzhi2001),glorot_uniform has not been developed.
+            string bias_initializer = "zeros",
+            bool unit_forget_bias = true,
+            float dropout = 0f,
+            float recurrent_dropout = 0f,
+            int implementation = 2)
+            => new LSTMCell(new LSTMCellArgs
+            {
+                Units = uints,
+                Activation = keras.activations.GetActivationFromName(activation),
+                RecurrentActivation = keras.activations.GetActivationFromName(recurrent_activation),
+                UseBias = use_bias,
+                KernelInitializer = GetInitializerByName(kernel_initializer),
+                RecurrentInitializer = GetInitializerByName(recurrent_initializer),
+                BiasInitializer = GetInitializerByName(bias_initializer),
+                UnitForgetBias = unit_forget_bias,
+                Dropout = dropout,
+                RecurrentDropout = recurrent_dropout,
+                Implementation = implementation
+            });
+
         /// <summary>
         /// Long Short-Term Memory layer - Hochreiter 1997.
         /// </summary>
@@ -769,7 +869,8 @@ public ILayer LSTM(int units,
                     GoBackwards = go_backwards,
                     Stateful = stateful,
                     TimeMajor = time_major,
-                    Unroll = unroll
+                    Unroll = unroll, 
+                    UnitForgetBias = unit_forget_bias
                 });
 
         /// <summary>
diff --git a/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs b/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs
index 7df654eeb..bcbb20d88 100644
--- a/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs
+++ b/src/TensorFlowNET.Keras/Layers/Merging/Merge.cs
@@ -5,6 +5,7 @@
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             return _merge_function(inputs);
         }
diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs
index d02d2509c..655581576 100644
--- a/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs
+++ b/src/TensorFlowNET.Keras/Layers/Normalization/BatchNormalization.cs
@@ -17,6 +17,7 @@ limitations under the License.
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -146,7 +147,7 @@ bool _support_zero_size_input()
             return false;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor outputs = null;
             var training_tensor = training == null
diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs
index e90c04029..1898f24c8 100644
--- a/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs
+++ b/src/TensorFlowNET.Keras/Layers/Normalization/LayerNormalization.cs
@@ -17,6 +17,7 @@ limitations under the License.
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
@@ -101,7 +102,7 @@ public override Shape ComputeOutputShape(Shape input_shape)
             return input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor outputs = null;
             var inputs_dtype = inputs.dtype.as_base_dtype();
diff --git a/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs b/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs
index a65154bf4..987b56bc4 100644
--- a/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs
+++ b/src/TensorFlowNET.Keras/Layers/Normalization/Normalization.cs
@@ -14,6 +14,7 @@ You may obtain a copy of the License at
    limitations under the License.
 ******************************************************************************/
 
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Saving;
 
@@ -157,7 +158,7 @@ public override void adapt(Tensor data, int? batch_size = null, int? steps = nul
             base.adapt(data, batch_size: batch_size, steps: steps);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (_args.Invert)
             {
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs
index d62fb63a4..ffaabec97 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling1D.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -12,7 +13,7 @@ public GlobalAveragePooling1D(Pooling1DArgs args)
         {
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (data_format == "channels_last")
                 return math_ops.reduce_mean(inputs, 1, false);
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs
index 000e4b8b9..e06665173 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalAveragePooling2D.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -12,7 +13,7 @@ public GlobalAveragePooling2D(Pooling2DArgs args)
         {
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (data_format == "channels_last")
                 return math_ops.reduce_mean(inputs, (1, 2), false);
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs
index 2de4671ca..15695e8a7 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling1D.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -12,7 +13,7 @@ public GlobalMaxPooling1D(Pooling1DArgs args)
         {
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (data_format == "channels_last")
                 return math_ops.reduce_max(inputs, 1, false);
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs
index b7e2c9452..76db858da 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/GlobalMaxPooling2D.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -12,7 +13,7 @@ public GlobalMaxPooling2D(Pooling2DArgs args)
         {
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (data_format == "channels_last")
                 return math_ops.reduce_max(inputs, (1, 2), false);
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs
index a2f4c51b6..81a340199 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling1D.cs
@@ -18,6 +18,7 @@ limitations under the License.
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Utils;
+using Tensorflow.Common.Types;
 using static Tensorflow.Binding;
 
 namespace Tensorflow.Keras.Layers
@@ -36,7 +37,7 @@ public Pooling1D(Pooling1DArgs args)
             input_spec = new InputSpec(ndim: 3);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             int pad_axis = args.DataFormat == "channels_first" ? 2 : 3;
             inputs = tf.expand_dims(inputs, pad_axis);
diff --git a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs
index 270322559..f83f1e152 100644
--- a/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Pooling/Pooling2D.cs
@@ -17,6 +17,7 @@ limitations under the License.
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Utils;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -36,7 +37,7 @@ public Pooling2D(Pooling2DArgs args)
             input_spec = new InputSpec(ndim: 4);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             int[] pool_shape;
             int[] strides;
diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs
index 5620a916c..20d2a53d5 100644
--- a/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs
+++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/CategoryEncoding.cs
@@ -1,6 +1,6 @@
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
-
+using Tensorflow.Common.Types;
 namespace Tensorflow.Keras.Layers
 {
     /// <summary>
@@ -15,7 +15,7 @@ public CategoryEncoding(CategoryEncodingArgs args) : base(args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var depth = args.NumTokens;
             var max_value = tf.reduce_max(inputs);
diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs
index 5fc581af9..7fa367eea 100644
--- a/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs
+++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/Rescaling.cs
@@ -1,5 +1,6 @@
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -17,7 +18,7 @@ public Rescaling(RescalingArgs args) : base(args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             scale = constant_op.constant(args.Scale, args.DType);
             offset = constant_op.constant(args.Offset, args.DType);
diff --git a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs
index 603e2b071..081966ad4 100644
--- a/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs
+++ b/src/TensorFlowNET.Keras/Layers/Preprocessing/Resizing.cs
@@ -4,6 +4,7 @@
 using System.Text;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -19,7 +20,7 @@ public Resizing(ResizingArgs args) : base(args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             return image_ops_impl.resize_images_v2(inputs, new[] { args.Height, args.Width }, method: args.Interpolation);
         }
diff --git a/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs b/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs
index aa3a92a49..ada1851ce 100644
--- a/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs
+++ b/src/TensorFlowNET.Keras/Layers/Regularization/Dropout.cs
@@ -1,4 +1,5 @@
-using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Utils;
 using static Tensorflow.Binding;
@@ -15,7 +16,7 @@ public Dropout(DropoutArgs args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (training == null)
                 training = false;
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs
index 9ead15cb5..312854388 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping1D.cs
@@ -1,6 +1,8 @@
 using Tensorflow.Keras.ArgsDefinition.Reshaping;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers.Reshaping
 {
@@ -27,7 +29,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor output = inputs;
             if (output.rank != 3)
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs
index 087d59a14..4a5c6eabc 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping2D.cs
@@ -1,6 +1,7 @@
 using Tensorflow.Keras.ArgsDefinition.Reshaping;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers.Reshaping
 {
@@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape)
             built = true;
             _buildInputShape = input_shape;
         }
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor output = inputs;
             if (output.rank != 4)
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs
index 04a1af600..83f86c6fc 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Cropping3D.cs
@@ -1,6 +1,7 @@
 using Tensorflow.Keras.ArgsDefinition.Reshaping;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers.Reshaping
 {
@@ -21,7 +22,7 @@ public override void build(KerasShapesWrapper input_shape)
             _buildInputShape = input_shape;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor output = inputs;
             if (output.rank != 5)
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs
index 539b5f624..a6192849d 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Flatten.cs
@@ -1,5 +1,6 @@
 using System;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Framework;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
@@ -23,7 +24,7 @@ public Flatten(FlattenArgs args)
             _channels_first = args.DataFormat == "channels_first";
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (_channels_first)
             {
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs
index e391775c8..7fdb816bf 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Permute.cs
@@ -6,6 +6,7 @@
 using static Tensorflow.Binding;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers {
     public class Permute : Layer
@@ -28,7 +29,7 @@ public override void build(KerasShapesWrapper input_shape)
             built = true;
             _buildInputShape = input_shape;
         }
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             Tensor outputs = inputs;
             return tf.transpose(outputs, new Axis(permute));
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs
index 92a772f34..4b3d30e29 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/Reshape.cs
@@ -4,6 +4,7 @@
 using System.Collections.Generic;
 using System;
 using System.Linq;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -19,7 +20,7 @@ public Reshape(ReshapeArgs args)
             this.args = args;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             var shapes = new List<Tensor>();
             shapes.Add(array_ops.shape(inputs)[0]);
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs
index 8314151f6..223f33d4f 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/UpSampling2D.cs
@@ -6,6 +6,7 @@
 using Tensorflow.Keras.Utils;
 using static Tensorflow.Binding;
 using static Tensorflow.KerasApi;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -24,7 +25,7 @@ public UpSampling2D(UpSampling2DArgs args) : base(args)
             inputSpec = new InputSpec(ndim: 4);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             return keras.backend.resize_images(inputs, 
                 size[0], size[1], 
diff --git a/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs b/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs
index 7c87100a2..3b37dac46 100644
--- a/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs
+++ b/src/TensorFlowNET.Keras/Layers/Reshaping/ZeroPadding2D.cs
@@ -2,6 +2,7 @@
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Utils;
+using Tensorflow.Common.Types;
 using static Tensorflow.KerasApi;
 
 namespace Tensorflow.Keras.Layers
@@ -26,7 +27,7 @@ public ZeroPadding2D(ZeroPadding2DArgs args, string data_format = null)
             this.input_spec = new InputSpec(ndim: 4);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             return keras.backend.spatial_2d_padding(inputs,
                 padding: padding,
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs
new file mode 100644
index 000000000..75feb8ea2
--- /dev/null
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/DropoutRNNCellMixin.cs
@@ -0,0 +1,109 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Keras.Engine;
+using Tensorflow.Keras.Utils;
+
+namespace Tensorflow.Keras.Layers.Rnn
+{
+    public abstract class DropoutRNNCellMixin: Layer, IRnnCell
+    {
+        public float dropout;
+        public float recurrent_dropout;
+        // TODO(Rinne): deal with cache.
+        public DropoutRNNCellMixin(LayerArgs args): base(args)
+        {
+
+        }
+
+        public abstract INestStructure<long> StateSize { get; }
+        public abstract INestStructure<long> OutputSize { get; }
+        public abstract bool SupportOptionalArgs { get; }
+        public virtual Tensors GetInitialState(Tensors inputs, Tensor batch_size, TF_DataType dtype)
+        {
+            return RnnUtils.generate_zero_filled_state_for_cell(this, inputs, batch_size, dtype);
+        }
+
+        protected void _create_non_trackable_mask_cache()
+        {
+            
+        }
+
+        public void reset_dropout_mask()
+        {
+
+        }
+
+        public void reset_recurrent_dropout_mask()
+        {
+
+        }
+
+        public Tensors? get_dropout_mask_for_cell(Tensors input, bool training, int count = 1)
+        {
+            if (dropout == 0f)
+                return null;
+            return _generate_dropout_mask(
+                tf.ones_like(input),
+                dropout,
+                training,
+                count);
+        }
+
+        // Get the recurrent dropout mask for RNN cell.
+        public Tensors? get_recurrent_dropout_mask_for_cell(Tensors input, bool training, int count = 1)
+        {
+            if (dropout == 0f)
+                return null;
+            return _generate_dropout_mask(
+                tf.ones_like(input),
+                recurrent_dropout,
+                training,
+                count);
+        }
+
+        public Tensors _create_dropout_mask(Tensors input, bool training, int count = 1)
+        {
+            return _generate_dropout_mask(
+                tf.ones_like(input),
+                dropout,
+                training,
+                count);
+        }
+
+        public Tensors _create_recurrent_dropout_mask(Tensors input, bool training, int count = 1)
+        {
+            return _generate_dropout_mask(
+                tf.ones_like(input),
+                recurrent_dropout,
+                training,
+                count);
+        }
+
+        public Tensors _generate_dropout_mask(Tensor ones, float rate, bool training, int count = 1)
+        {
+            Tensors dropped_inputs()
+            {
+                DropoutArgs args = new DropoutArgs();
+                args.Rate = rate;
+                var DropoutLayer = new Dropout(args);
+                var mask = DropoutLayer.Apply(ones, training: training);
+                return mask;
+            }
+
+            if (count > 1)
+            {
+                Tensors results = new Tensors();
+                for (int i = 0; i < count; i++)
+                {
+                    results.Add(dropped_inputs());
+                }
+                return results;
+            }
+
+            return dropped_inputs();
+        }
+    }
+}
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs
index 59555e62b..025465fd6 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTM.cs
@@ -1,6 +1,8 @@
 using System.Linq;
 using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Common.Types;
+using Tensorflow.Common.Extensions;
 
 namespace Tensorflow.Keras.Layers.Rnn
 {
@@ -13,22 +15,105 @@ namespace Tensorflow.Keras.Layers.Rnn
     public class LSTM : RNN
     {
         LSTMArgs args;
-        InputSpec[] state_spec;
-
-        int units => args.Units;
+        InputSpec[] _state_spec;
+        InputSpec _input_spec;
+        bool _could_use_gpu_kernel;
 
         public LSTM(LSTMArgs args) :
-            base(args)
+            base(CreateCell(args), args)
         {
             this.args = args;
-            state_spec = new[] { units, units }
-                .Select(dim => new InputSpec(shape: (-1, dim)))
-                .ToArray();
+            _input_spec = new InputSpec(ndim: 3);
+            _state_spec = new[] { args.Units, args.Units }.Select(dim => new InputSpec(shape: (-1, dim))).ToArray();
+            _could_use_gpu_kernel = args.Activation == keras.activations.Tanh
+                && args.RecurrentActivation == keras.activations.Sigmoid
+                && args.RecurrentDropout == 0 && !args.Unroll && args.UseBias
+                && ops.executing_eagerly_outside_functions();
+        }
+
+        private static IRnnCell CreateCell(LSTMArgs lstmArgs)
+        {
+            return new LSTMCell(new LSTMCellArgs()
+            {
+                Units = lstmArgs.Units,
+                Activation = lstmArgs.Activation,
+                RecurrentActivation = lstmArgs.RecurrentActivation,
+                UseBias = lstmArgs.UseBias,
+                KernelInitializer = lstmArgs.KernelInitializer,
+                RecurrentInitializer = lstmArgs.RecurrentInitializer,
+                UnitForgetBias = lstmArgs.UnitForgetBias,
+                BiasInitializer = lstmArgs.BiasInitializer,
+                // TODO(Rinne): kernel_regularizer
+                // TODO(Rinne): recurrent_regularizer
+                // TODO(Rinne): bias_regularizer
+                // TODO(Rinne): kernel_constriant
+                // TODO(Rinne): recurrent_constriant
+                // TODO(Rinne): bias_constriant
+                Dropout = lstmArgs.Dropout,
+                RecurrentDropout = lstmArgs.RecurrentDropout,
+                Implementation = lstmArgs.Implementation,
+                DType = lstmArgs.DType,
+                Trainable = lstmArgs.Trainable
+            });
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
-            return base.Call(inputs, state: state, training: training);
+            // skip the condition of ragged input
+
+            (inputs, initial_state, _) = _process_inputs(inputs, initial_state, null);
+
+            Tensor mask = null;
+            if(optional_args is RnnOptionalArgs rnnArgs)
+            {
+                mask = rnnArgs.Mask;
+            }
+
+            var single_input = inputs.Single;
+            var input_shape = single_input.shape;
+            var timesteps = args.TimeMajor ? input_shape[0] : input_shape[1];
+
+            _maybe_reset_cell_dropout_mask(Cell);
+
+            Func<Tensors, Tensors, (Tensors, Tensors)> step = (inputs, states) =>
+            {
+                var res = Cell.Apply(inputs, states, training is null ? true : training.Value);
+                var (output, state) = res;
+                return (output, state);
+            };
+
+            var (last_output, outputs, states) = keras.backend.rnn(
+                step,
+                inputs,
+                initial_state,
+                constants: null,
+                go_backwards: args.GoBackwards,
+                mask: mask,
+                unroll: args.Unroll,
+                input_length: ops.convert_to_tensor(timesteps),
+                time_major: args.TimeMajor,
+                zero_output_for_mask: args.ZeroOutputForMask,
+                return_all_outputs: args.ReturnSequences
+            );
+
+            Tensor output;
+            if (args.ReturnSequences)
+            {
+                output = keras.backend.maybe_convert_to_ragged(false, outputs, (int)timesteps, args.GoBackwards);
+            }
+            else
+            {
+                output = last_output;
+            }
+
+            if (args.ReturnState)
+            {
+                return new Tensor[] { output }.Concat(states).ToArray().ToTensors();
+            }
+            else
+            {
+                return output;
+            }
         }
     }
 }
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs
index a622c91a9..284a2b778 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/LSTMCell.cs
@@ -1,16 +1,233 @@
-using Tensorflow.Keras.ArgsDefinition.Rnn;
+using Newtonsoft.Json;
+using Serilog.Core;
+using System.Diagnostics;
+using Tensorflow.Common.Extensions;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
+using Tensorflow.Keras.Saving;
+using Tensorflow.Keras.Utils;
 
 namespace Tensorflow.Keras.Layers.Rnn
 {
-    public class LSTMCell : Layer
+    /// <summary>
+    /// Cell class for the LSTM layer.
+    /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
+    /// for details about the usage of RNN API.
+    /// This class processes one step within the whole time sequence input, whereas
+    /// `tf.keras.layer.LSTM` processes the whole sequence.
+    /// </summary>
+    public class LSTMCell : DropoutRNNCellMixin
     {
-        LSTMCellArgs args;
+        LSTMCellArgs _args;
+        IVariableV1 _kernel;
+        IVariableV1 _recurrent_kernel;
+        IInitializer _bias_initializer;
+        IVariableV1 _bias;
+        INestStructure<long> _state_size;
+        INestStructure<long> _output_size;
+        public override INestStructure<long> StateSize => _state_size;
 
+        public override INestStructure<long> OutputSize => _output_size;
+
+        public override bool SupportOptionalArgs => false;
         public LSTMCell(LSTMCellArgs args)
             : base(args)
         {
-            this.args = args;
+            _args = args;
+            if (args.Units <= 0)
+            {
+                throw new ValueError(
+                            $"units must be a positive integer, got {args.Units}");
+            }
+            _args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout));
+            _args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout));
+            if (_args.RecurrentDropout != 0f && _args.Implementation != 1)
+            {
+                Debug.WriteLine("RNN `implementation=2` is not supported when `recurrent_dropout` is set." +
+                    "Using `implementation=1`.");
+                _args.Implementation = 1;
+            }
+
+            _state_size = new NestList<long>(_args.Units, _args.Units);
+            _output_size = new NestNode<long>(_args.Units);
+        }
+
+        public override void build(KerasShapesWrapper input_shape)
+        {
+            base.build(input_shape);
+            var single_shape = input_shape.ToSingleShape();
+            var input_dim = single_shape[-1];
+            _kernel = add_weight("kernel", (input_dim, _args.Units * 4),
+                initializer: _args.KernelInitializer
+            );
+
+            _recurrent_kernel = add_weight("recurrent_kernel", (_args.Units, _args.Units * 4),
+                initializer: _args.RecurrentInitializer
+            );
+
+            if (_args.UseBias)
+            {
+                if (_args.UnitForgetBias)
+                {
+                    Tensor bias_initializer()
+                    {
+                        return keras.backend.concatenate(
+                            new Tensors(
+                            _args.BiasInitializer.Apply(new InitializerArgs(shape: (_args.Units))),
+                            tf.ones_initializer.Apply(new InitializerArgs(shape: (_args.Units))),
+                            _args.BiasInitializer.Apply(new InitializerArgs(shape: (_args.Units)))), axis: 0);
+                    }
+                }
+                else
+                {
+                    _bias_initializer = _args.BiasInitializer;
+                }
+                _bias = add_weight("bias", (_args.Units * 4),
+                    initializer: _bias_initializer
+                    );
+            }
+            built = true;
+        }
+        protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null)
+        {
+            var h_tm1 = states[0]; // previous memory state
+            var c_tm1 = states[1]; // previous carry state
+
+            var dp_mask = get_dropout_mask_for_cell(inputs, training.Value, count: 4);
+            var rec_dp_mask = get_recurrent_dropout_mask_for_cell(
+                               h_tm1, training.Value, count: 4);
+
+            Tensor c;
+            Tensor o;
+            if (_args.Implementation == 1)
+            {
+                Tensor inputs_i;
+                Tensor inputs_f;
+                Tensor inputs_c;
+                Tensor inputs_o;
+                if (0f < _args.Dropout && _args.Dropout < 1f)
+                {
+                    inputs_i = inputs * dp_mask[0];
+                    inputs_f = inputs * dp_mask[1];
+                    inputs_c = inputs * dp_mask[2];
+                    inputs_o = inputs * dp_mask[3];
+                }
+                else
+                {
+                    inputs_i = inputs;
+                    inputs_f = inputs;
+                    inputs_c = inputs;
+                    inputs_o = inputs;
+                }
+                var k = tf.split(_kernel.AsTensor(), num_split: 4, axis: 1);
+                Tensor k_i = k[0], k_f = k[1], k_c = k[2], k_o = k[3];
+                var x_i = math_ops.matmul(inputs_i, k_i);
+                var x_f = math_ops.matmul(inputs_f, k_f);
+                var x_c = math_ops.matmul(inputs_c, k_c);
+                var x_o = math_ops.matmul(inputs_o, k_o);
+                if (_args.UseBias)
+                {
+                    var b = tf.split(_bias.AsTensor(), num_split: 4, axis: 0);
+                    Tensor b_i = b[0], b_f = b[1], b_c = b[2], b_o = b[3];
+                    x_i = gen_nn_ops.bias_add(x_i, b_i);
+                    x_f = gen_nn_ops.bias_add(x_f, b_f);
+                    x_c = gen_nn_ops.bias_add(x_c, b_c);
+                    x_o = gen_nn_ops.bias_add(x_o, b_o);
+                }
+
+                Tensor h_tm1_i;
+                Tensor h_tm1_f;
+                Tensor h_tm1_c;
+                Tensor h_tm1_o;
+                if (0f < _args.RecurrentDropout && _args.RecurrentDropout < 1f)
+                {
+                    h_tm1_i = h_tm1 * rec_dp_mask[0];
+                    h_tm1_f = h_tm1 * rec_dp_mask[1];
+                    h_tm1_c = h_tm1 * rec_dp_mask[2];
+                    h_tm1_o = h_tm1 * rec_dp_mask[3];
+                }
+                else
+                {
+                    h_tm1_i = h_tm1;
+                    h_tm1_f = h_tm1;
+                    h_tm1_c = h_tm1;
+                    h_tm1_o = h_tm1;
+                }
+                var x = new Tensor[] { x_i, x_f, x_c, x_o };
+                var h_tm1_array = new Tensor[] { h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o };
+                (c, o) = _compute_carry_and_output(x, h_tm1_array, c_tm1);
+            }
+            else
+            {
+                if (0f < _args.Dropout && _args.Dropout < 1f)
+                    inputs = inputs * dp_mask[0];
+                var z = math_ops.matmul(inputs, _kernel.AsTensor());
+                z += math_ops.matmul(h_tm1, _recurrent_kernel.AsTensor());
+                if (_args.UseBias)
+                {
+                    z = tf.nn.bias_add(z, _bias);
+                }
+                var z_array = tf.split(z, num_split: 4, axis: 1);
+                (c, o) = _compute_carry_and_output_fused(z_array, c_tm1);
+            }
+            var h = o * _args.Activation.Apply(c);
+            // 这里是因为 Tensors 类初始化的时候会把第一个元素之后的元素打包成一个数组
+            return new Nest<Tensor>(new INestStructure<Tensor>[] { new NestNode<Tensor>(h), new NestList<Tensor>(h, c) }).ToTensors();
+        }
+
+        /// <summary>
+        /// Computes carry and output using split kernels.
+        /// </summary>
+        /// <param name="x"></param>
+        /// <param name="h_tm1"></param>
+        /// <param name="c_tm1"></param>
+        /// <returns></returns>
+        /// <exception cref="NotImplementedException"></exception>
+        public Tensors _compute_carry_and_output(Tensor[] x, Tensor[] h_tm1, Tensor c_tm1) 
+        {
+            Tensor x_i = x[0], x_f = x[1], x_c = x[2], x_o = x[3];
+            Tensor h_tm1_i = h_tm1[0], h_tm1_f = h_tm1[1], h_tm1_c = h_tm1[2], 
+                h_tm1_o = h_tm1[3];
+
+            var _recurrent_kernel_tensor = _recurrent_kernel.AsTensor();
+            int startIndex = (int)_recurrent_kernel_tensor.shape[0];
+            var _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor, 
+                new[] { 0, 0 }, new[] { startIndex, _args.Units });
+            var i = _args.RecurrentActivation.Apply(
+                    x_i + math_ops.matmul(h_tm1_i, _recurrent_kernel_slice));
+            _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor,
+                new[] { 0, _args.Units }, new[] { startIndex, _args.Units});
+            var f = _args.RecurrentActivation.Apply(
+                    x_f + math_ops.matmul(h_tm1_f, _recurrent_kernel_slice));
+            _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor,
+                new[] { 0, _args.Units * 2 }, new[] { startIndex, _args.Units });
+            var c = f * c_tm1 + i * _args.Activation.Apply(
+                    x_c + math_ops.matmul(h_tm1_c, _recurrent_kernel_slice));
+            _recurrent_kernel_slice = tf.slice(_recurrent_kernel_tensor,
+                new[] { 0, _args.Units * 3 }, new[] { startIndex, _args.Units });
+            var o = _args.Activation.Apply(
+                x_o + math_ops.matmul(h_tm1_o, _recurrent_kernel_slice));
+
+            return new Tensors(c, o);
+        }
+
+        /// <summary>
+        /// Computes carry and output using fused kernels.
+        /// </summary>
+        /// <param name="z"></param>
+        /// <param name="c_tm1"></param>
+        /// <returns></returns>
+        public Tensors _compute_carry_and_output_fused(Tensor[] z, Tensor c_tm1)
+        {
+            Tensor z0 = z[0], z1 = z[1], z2 = z[2], z3 = z[3];
+            var i = _args.RecurrentActivation.Apply(z0);
+            var f = _args.RecurrentActivation.Apply(z1);
+            var c = f * c_tm1 + i * _args.Activation.Apply(z2);
+            var o = _args.RecurrentActivation.Apply(z3);
+            return new Tensors(c, o);
         }
     }
+
+    
 }
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs
index 310e80574..f86de8a85 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/RNN.cs
@@ -1,136 +1,580 @@
-using System;
+using OneOf;
+using System;
 using System.Collections.Generic;
+using System.Reflection;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Util;
+using Tensorflow.Common.Extensions;
+using System.Linq.Expressions;
+using Tensorflow.Keras.Utils;
+using Tensorflow.Common.Types;
+using System.Runtime.CompilerServices;
 // from tensorflow.python.distribute import distribution_strategy_context as ds_context;
 
 namespace Tensorflow.Keras.Layers.Rnn
 {
-    public class RNN : Layer
+    /// <summary>
+    /// Base class for recurrent layers.
+    /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
+    /// for details about the usage of RNN API.
+    /// </summary>
+    public class RNN : RnnBase
     {
-        private RNNArgs args;
-        private object input_spec = null; // or NoneValue??
-        private object state_spec = null;
-        private object _states = null;
-        private object constants_spec = null;
-        private int _num_constants = 0;
-        protected IVariableV1 kernel;
-        protected IVariableV1 bias;
-        protected ILayer cell;
-        public RNN(RNNArgs args) : base(PreConstruct(args))
+        private RNNArgs _args;
+        private object _input_spec = null; // or NoneValue??
+        private object _state_spec = null;
+        private Tensors _states = null;
+        private object _constants_spec = null;
+        private int _num_constants;
+        protected IVariableV1 _kernel;
+        protected IVariableV1 _bias;
+        private IRnnCell _cell;
+        protected IRnnCell Cell
         {
-            this.args = args;
+            get
+            {
+                return _cell;
+            }
+            init
+            {
+                _cell = value;
+                _self_tracked_trackables.Add(_cell);
+            }
+        }
+
+        public RNN(IRnnCell cell, RNNArgs args) : base(PreConstruct(args))
+        {
+            _args = args;
+            SupportsMasking = true;
+
+            Cell = cell;
+
+            // get input_shape
+            _args = PreConstruct(args);
+
+            _num_constants = 0;
+        }
+
+        public RNN(IEnumerable<IRnnCell> cells, RNNArgs args) : base(PreConstruct(args))
+        {
+            _args = args;
             SupportsMasking = true;
 
-            // The input shape is unknown yet, it could have nested tensor inputs, and
-            // the input spec will be the list of specs for nested inputs, the structure
-            // of the input_spec will be the same as the input.
-
-            //if(stateful)
-            //{
-            //    if (ds_context.has_strategy()) // ds_context????
-            //    {
-            //        throw new Exception("RNNs with stateful=True not yet supported with tf.distribute.Strategy");
-            //    }
-            //}
+            Cell = new StackedRNNCells(cells, new StackedRNNCellsArgs());
+
+            // get input_shape
+            _args = PreConstruct(args);
+
+            _num_constants = 0;
+        }
+
+        // States is a tuple consist of cell states_size, like (cell1.state_size, cell2.state_size,...)
+        // state_size can be a single integer, can also be a list/tuple of integers, can also be TensorShape or a list/tuple of TensorShape
+        public Tensors States
+        {
+            get
+            {
+                if (_states == null)
+                {
+                    // CHECK(Rinne): check if this is correct.
+                    var nested = Cell.StateSize.MapStructure<Tensor?>(x => null);
+                    _states = nested.AsNest().ToTensors();
+                }
+                return _states;
+            }
+            set { _states = value; }
+        }
+
+        private INestStructure<Shape> compute_output_shape(Shape input_shape)
+        {
+            var batch = input_shape[0];
+            var time_step = input_shape[1];
+            if (_args.TimeMajor)
+            {
+                (batch, time_step) = (time_step, batch);
+            }
+
+            // state_size is a array of ints or a positive integer
+            var state_size = Cell.StateSize;
+            if(state_size?.TotalNestedCount == 1)
+            {
+                state_size = new NestList<long>(state_size.Flatten().First());
+            }
+
+            Func<long, Shape>  _get_output_shape = (flat_output_size) =>
+            {
+                var output_dim = new Shape(flat_output_size).as_int_list();
+                Shape output_shape;
+                if (_args.ReturnSequences)
+                {
+                    if (_args.TimeMajor)
+                    {
+                        output_shape = new Shape(new int[] { (int)time_step, (int)batch }.concat(output_dim));
+                    }
+                    else
+                    {
+                        output_shape = new Shape(new int[] { (int)batch, (int)time_step }.concat(output_dim));
+
+                    }
+                }
+                else
+                {
+                    output_shape = new Shape(new int[] { (int)batch }.concat(output_dim));
+                }
+                return output_shape;
+            };
+
+            Type type = Cell.GetType();
+            PropertyInfo output_size_info = type.GetProperty("output_size");
+            INestStructure<Shape> output_shape;
+            if (output_size_info != null)
+            {
+                output_shape = Nest.MapStructure(_get_output_shape, Cell.OutputSize);
+            }
+            else
+            {
+                output_shape = new NestNode<Shape>(_get_output_shape(state_size.Flatten().First()));
+            }
+
+            if (_args.ReturnState)
+            {
+                Func<long, Shape> _get_state_shape = (flat_state) =>
+                {
+                    var state_shape = new int[] { (int)batch }.concat(new Shape(flat_state).as_int_list());
+                    return new Shape(state_shape);
+                };
+
+
+                var state_shape = Nest.MapStructure(_get_state_shape, state_size);
+
+                return new Nest<Shape>(new[] { output_shape, state_shape } );
+            }
+            else
+            {
+                return output_shape;
+            }
+
+        }
+
+        private Tensors compute_mask(Tensors inputs, Tensors mask)
+        {
+            // Time step masks must be the same for each input.
+            // This is because the mask for an RNN is of size [batch, time_steps, 1],
+            // and specifies which time steps should be skipped, and a time step
+            // must be skipped for all inputs.
+
+            mask = nest.flatten(mask)[0];
+            var output_mask = _args.ReturnSequences ? mask : null;
+            if (_args.ReturnState)
+            {
+                var state_mask = new List<Tensor>();
+                for (int i = 0; i < len(States); i++)
+                {
+                    state_mask.Add(null);
+                }
+                return new List<Tensor> { output_mask }.concat(state_mask);
+            }
+            else
+            {
+                return output_mask;
+            }
         }
 
         public override void build(KerasShapesWrapper input_shape)
         {
-            if (!cell.Built)
+            input_shape = new KerasShapesWrapper(input_shape.Shapes[0]);
+
+            InputSpec get_input_spec(Shape shape)
+            {
+                var input_spec_shape = shape.as_int_list();
+
+                var (batch_index, time_step_index) = _args.TimeMajor ? (1, 0) : (0, 1);
+                if (!_args.Stateful)
+                {
+                    input_spec_shape[batch_index] = -1;
+                }
+                input_spec_shape[time_step_index] = -1;
+                return new InputSpec(shape: input_spec_shape);
+            }
+
+            Shape get_step_input_shape(Shape shape)
+            {
+
+                // return shape[1:] if self.time_major else (shape[0],) + shape[2:]
+                if (_args.TimeMajor)
+                {
+                    return shape.as_int_list().ToList().GetRange(1, shape.Length - 1).ToArray();
+                }
+                else
+                {
+                    return new int[] { shape.as_int_list()[0] }.concat(shape.as_int_list().ToList().GetRange(2, shape.Length - 2).ToArray());
+                }
+
+
+            }
+
+            object get_state_spec(Shape shape)
+            {
+                var state_spec_shape = shape.as_int_list();
+                // append bacth dim
+                state_spec_shape = new int[] { -1 }.concat(state_spec_shape);
+                return new InputSpec(shape: state_spec_shape);
+            }
+
+            // Check whether the input shape contains any nested shapes. It could be
+            // (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from
+            // numpy inputs.
+
+
+            if (Cell is Layer layer && !layer.Built)
+            {
+                layer.build(input_shape);
+                layer.Built = true;
+            }
+
+            this.built = true;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="inputs"></param>
+        /// <param name="mask">Binary tensor of shape [batch_size, timesteps] indicating whether a given timestep should be masked</param>
+        /// <param name="training"></param>
+        /// <param name="initial_state">List of initial state tensors to be passed to the first call of the cell</param>
+        /// <param name="constants">List of constant tensors to be passed to the cell at each timestep</param>
+        /// <returns></returns>
+        /// <exception cref="ValueError"></exception>
+        /// <exception cref="NotImplementedException"></exception>
+        protected override Tensors Call(Tensors inputs, Tensors initial_state = null, bool? training = null, IOptionalArgs? optional_args = null)
+        {
+            RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs;
+            if(optional_args is not null && rnn_optional_args is null)
+            {
+                throw new ArgumentException("The optional args shhould be of type `RnnOptionalArgs`");
+            }
+            Tensors? constants = rnn_optional_args?.Constants;
+            Tensors? mask = rnn_optional_args?.Mask;
+            //var (inputs_padded, row_length) = BackendImpl.convert_inputs_if_ragged(inputs);
+            // 暂时先不接受ragged tensor
+            int row_length = 0; // TODO(Rinne): support this param.
+            bool is_ragged_input = false;
+            _validate_args_if_ragged(is_ragged_input, mask);
+
+            (inputs, initial_state, constants) = _process_inputs(inputs, initial_state, constants);
+
+            _maybe_reset_cell_dropout_mask(Cell);
+            if (Cell is StackedRNNCells)
+            {
+                var stack_cell = Cell as StackedRNNCells;
+                foreach (IRnnCell cell in stack_cell.Cells)
+                {
+                    _maybe_reset_cell_dropout_mask(cell);
+                }
+            }
+
+            if (mask != null)
+            {
+                // Time step masks must be the same for each input.
+                mask = mask.Flatten().First();
+            }
+
+            Shape input_shape;
+            if (!inputs.IsNested())
+            {
+                // In the case of nested input, use the first element for shape check
+                // input_shape = nest.flatten(inputs)[0].shape;
+                // TODO(Wanglongzhi2001)
+                input_shape = inputs.Flatten().First().shape;
+            }
+            else
+            {
+                input_shape = inputs.shape;
+            }
+
+            var timesteps = _args.TimeMajor ? input_shape[0] : input_shape[1];
+
+            if (_args.Unroll && timesteps == null)
+            {
+                throw new ValueError(
+                "Cannot unroll a RNN if the " +
+                "time dimension is undefined. \n" +
+                "- If using a Sequential model, " +
+                "specify the time dimension by passing " +
+                "an `input_shape` or `batch_input_shape` " +
+                "argument to your first layer. If your " +
+                "first layer is an Embedding, you can " +
+                "also use the `input_length` argument.\n" +
+                "- If using the functional API, specify " +
+                "the time dimension by passing a `shape` " +
+                "or `batch_shape` argument to your Input layer."
+                );
+            }
+
+            // cell_call_fn = (self.cell.__call__ if callable(self.cell) else self.cell.call)
+            Func<Tensors, Tensors, (Tensors, Tensors)> step;
+            bool is_tf_rnn_cell = false;
+            if (constants is not null)
+            {
+                if (!Cell.SupportOptionalArgs)
+                {
+                    throw new ValueError(
+                          $"RNN cell {Cell} does not support constants." +
+                          $"Received: constants={constants}");
+                }
+
+                step = (inputs, states) =>
+                {
+                    constants = new Tensors(states.TakeLast(_num_constants).ToArray());
+                    states = new Tensors(states.SkipLast(_num_constants).ToArray());
+                    states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states[0]) : states;
+                    var (output, new_states) = Cell.Apply(inputs, states, optional_args: new RnnOptionalArgs() { Constants = constants });
+                    return (output, new_states);
+                };
+            }
+            else
+            {
+                step = (inputs, states) =>
+                {
+                    states = len(states) == 1 && is_tf_rnn_cell ? new Tensors(states.First()) : states;
+                    var (output, new_states) = Cell.Apply(inputs, states);
+                    return (output, new_states);
+                };
+            }
+           
+            var (last_output, outputs, states) = keras.backend.rnn(
+                step,
+                inputs,
+                initial_state,
+                constants: constants,
+                go_backwards: _args.GoBackwards,
+                mask: mask,
+                unroll: _args.Unroll,
+                input_length: row_length != null ? new Tensor(row_length) : new Tensor(timesteps),
+                time_major: _args.TimeMajor,
+                zero_output_for_mask: _args.ZeroOutputForMask,
+                return_all_outputs: _args.ReturnSequences);
+
+            if (_args.Stateful)
+            {
+                throw new NotImplementedException("this argument havn't been developed.");
+            }
+
+            Tensors output = new Tensors();
+            if (_args.ReturnSequences)
+            {
+                // TODO(Rinne): add go_backwards parameter and revise the `row_length` param
+                output = keras.backend.maybe_convert_to_ragged(is_ragged_input, outputs, row_length, false);
+            }
+            else
+            {
+                output = last_output;
+            }
+
+            if (_args.ReturnState)
+            {
+                foreach (var state in states)
+                {
+                    output.Add(state);
+                }
+                return output;
+            }
+            else
+            {
+                //var tapeSet = tf.GetTapeSet();
+                //foreach(var tape in tapeSet)
+                //{
+                //    tape.Watch(output);
+                //}
+                return output;
+            }
+        }
+
+        public override Tensors Apply(Tensors inputs, Tensors initial_states = null, bool training = false, IOptionalArgs? optional_args = null)
+        {
+            RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs;
+            if (optional_args is not null && rnn_optional_args is null)
+            {
+                throw new ArgumentException("The type of optional args should be `RnnOptionalArgs`.");
+            }
+            Tensors? constants = rnn_optional_args?.Constants;
+            (inputs, initial_states, constants) = RnnUtils.standardize_args(inputs, initial_states, constants, _num_constants);
+
+            if(initial_states is null && constants is null)
+            {
+                return base.Apply(inputs);
+            }
+
+            // TODO(Rinne): implement it.
+            throw new NotImplementedException();
+        }
+
+        protected (Tensors inputs, Tensors initial_state, Tensors constants) _process_inputs(Tensors inputs, Tensors initial_state, Tensors constants)
+        {
+            if (inputs.Length > 1)
+            {
+                if (_num_constants != 0)
+                {
+                    initial_state = new Tensors(inputs.Skip(1).ToArray());
+                }
+                else
+                {
+                    initial_state = new Tensors(inputs.Skip(1).SkipLast(_num_constants).ToArray());
+                    constants = new Tensors(inputs.TakeLast(_num_constants).ToArray());
+                }
+                if (len(initial_state) == 0)
+                    initial_state = null;
+                inputs = inputs[0];
+            }
+            
+
+            if (_args.Stateful)
+            {
+                if (initial_state != null)
+                {
+                    var tmp = new Tensor[] { };
+                    foreach (var s in nest.flatten(States))
+                    {
+                        tmp.add(tf.math.count_nonzero(s.Single()));
+                    }
+                    var non_zero_count = tf.add_n(tmp);
+                    initial_state = tf.cond(non_zero_count > 0, States, initial_state);
+                    if ((int)non_zero_count.numpy() > 0)
+                    {
+                        initial_state = States;
+                    }
+                }
+                else
+                {
+                    initial_state = States;
+                }
+                //initial_state = Nest.MapStructure(v => tf.cast(v, this.), initial_state);
+            }
+            else if (initial_state is null)
+            {
+                initial_state = get_initial_state(inputs);
+            }
+
+            if (initial_state.Length != States.Length)
             {
-                cell.build(input_shape);
+                throw new ValueError($"Layer {this} expects {States.Length} state(s), " +
+                                     $"but it received {initial_state.Length} " +
+                                     $"initial state(s). Input received: {inputs}");
             }
+
+            return (inputs, initial_state, constants);
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        private void _validate_args_if_ragged(bool is_ragged_input, Tensors mask)
         {
-            return base.Call(inputs, state, training);
+            if (!is_ragged_input)
+            {
+                return;
+            }
+
+            if (_args.Unroll)
+            {
+                throw new ValueError("The input received contains RaggedTensors and does " +
+                "not support unrolling. Disable unrolling by passing " +
+                "`unroll=False` in the RNN Layer constructor.");
+            }
+            if (mask != null)
+            {
+                throw new ValueError($"The mask that was passed in was {mask}, which " +
+                "cannot be applied to RaggedTensor inputs. Please " +
+                "make sure that there is no mask injected by upstream " +
+                "layers.");
+            }
+
         }
 
-        private static RNNArgs PreConstruct(RNNArgs args)
+        protected void _maybe_reset_cell_dropout_mask(ILayer cell)
         {
-            if (args.Kwargs == null)
+            if (cell is DropoutRNNCellMixin CellDRCMixin)
             {
-                args.Kwargs = new Dictionary<string, object>();
+                CellDRCMixin.reset_dropout_mask();
+                CellDRCMixin.reset_recurrent_dropout_mask();
             }
+        }
 
+        private static RNNArgs PreConstruct(RNNArgs args)
+        {
             // If true, the output for masked timestep will be zeros, whereas in the
             // false case, output from previous timestep is returned for masked timestep.
-            var zeroOutputForMask = (bool)args.Kwargs.Get("zero_output_for_mask", false);
+            var zeroOutputForMask = args.ZeroOutputForMask;
 
             Shape input_shape;
-            var propIS = (Shape)args.Kwargs.Get("input_shape", null);
-            var propID = (int?)args.Kwargs.Get("input_dim", null);
-            var propIL = (int?)args.Kwargs.Get("input_length", null);
+            var propIS = args.InputShape;
+            var propID = args.InputDim;
+            var propIL = args.InputLength;
 
             if (propIS == null && (propID != null || propIL != null))
             {
                 input_shape = new Shape(
                     propIL ?? -1,
                     propID ?? -1);
-                args.Kwargs["input_shape"] = input_shape;
+                args.InputShape = input_shape;
             }
 
             return args;
         }
 
-        public RNN New(LayerRnnCell cell,
-            bool return_sequences = false,
-            bool return_state = false,
-            bool go_backwards = false,
-            bool stateful = false,
-            bool unroll = false,
-            bool time_major = false)
-                => new RNN(new RNNArgs
-                {
-                    Cell = cell,
-                    ReturnSequences = return_sequences,
-                    ReturnState = return_state,
-                    GoBackwards = go_backwards,
-                    Stateful = stateful,
-                    Unroll = unroll,
-                    TimeMajor = time_major
-                });
-
-        public RNN New(IList<RnnCell> cell,
-            bool return_sequences = false,
-            bool return_state = false,
-            bool go_backwards = false,
-            bool stateful = false,
-            bool unroll = false,
-            bool time_major = false)
-                => new RNN(new RNNArgs
-                {
-                    Cell = new StackedRNNCells(new StackedRNNCellsArgs { Cells = cell }),
-                    ReturnSequences = return_sequences,
-                    ReturnState = return_state,
-                    GoBackwards = go_backwards,
-                    Stateful = stateful,
-                    Unroll = unroll,
-                    TimeMajor = time_major
-                });
-
-
-        protected Tensor get_initial_state(Tensor inputs)
+        public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = null)
         {
-            return _generate_zero_filled_state_for_cell(null, null);
+            throw new NotImplementedException();
         }
 
-        Tensor _generate_zero_filled_state_for_cell(LSTMCell cell, Tensor batch_size)
-        {
-            throw new NotImplementedException("");
-        }
+        // 好像不能cell不能传接口类型
+        //public RNN New(IRnnArgCell cell,
+        //    bool return_sequences = false,
+        //    bool return_state = false,
+        //    bool go_backwards = false,
+        //    bool stateful = false,
+        //    bool unroll = false,
+        //    bool time_major = false)
+        //        => new RNN(new RNNArgs
+        //        {
+        //            Cell = cell,
+        //            ReturnSequences = return_sequences,
+        //            ReturnState = return_state,
+        //            GoBackwards = go_backwards,
+        //            Stateful = stateful,
+        //            Unroll = unroll,
+        //            TimeMajor = time_major
+        //        });
+
+        //public RNN New(List<IRnnArgCell> cell,
+        //    bool return_sequences = false,
+        //    bool return_state = false,
+        //    bool go_backwards = false,
+        //    bool stateful = false,
+        //    bool unroll = false,
+        //    bool time_major = false)
+        //        => new RNN(new RNNArgs
+        //        {
+        //            Cell = cell,
+        //            ReturnSequences = return_sequences,
+        //            ReturnState = return_state,
+        //            GoBackwards = go_backwards,
+        //            Stateful = stateful,
+        //            Unroll = unroll,
+        //            TimeMajor = time_major
+        //        });
+
 
-        // Check whether the state_size contains multiple states.
-        public static bool _is_multiple_state(object state_size)
+        protected Tensors get_initial_state(Tensors inputs)
         {
-            var myIndexerProperty = state_size.GetType().GetProperty("Item");
-            return myIndexerProperty != null
-                && myIndexerProperty.GetIndexParameters().Length == 1
-                && !(state_size.GetType() == typeof(Shape));
+            var input = inputs[0];
+            var input_shape = array_ops.shape(inputs);
+            var batch_size = _args.TimeMajor ? input_shape[1] : input_shape[0];
+            var dtype = input.dtype;
+
+            Tensors init_state = Cell.GetInitialState(null, batch_size, dtype);
+
+            return init_state;
         }
     }
 }
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs
new file mode 100644
index 000000000..018b17780
--- /dev/null
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/RnnBase.cs
@@ -0,0 +1,13 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Keras.ArgsDefinition;
+using Tensorflow.Keras.Engine;
+
+namespace Tensorflow.Keras.Layers.Rnn
+{
+    public abstract class RnnBase: Layer
+    {
+        public RnnBase(LayerArgs args): base(args) { }
+    }
+}
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs
index 2d7aab70e..a22f31c7d 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNN.cs
@@ -10,23 +10,26 @@ namespace Tensorflow.Keras.Layers.Rnn
     public class SimpleRNN : RNN
     {
         SimpleRNNArgs args;
-        public SimpleRNN(SimpleRNNArgs args) : base(args)
+        public SimpleRNN(SimpleRNNArgs args) : base(CreateCellForArgs(args), args)
         {
             this.args = args;
         }
 
-        public override void build(KerasShapesWrapper input_shape)
+        private static SimpleRNNCell CreateCellForArgs(SimpleRNNArgs args)
         {
-            var single_shape = input_shape.ToSingleShape();
-            var input_dim = single_shape[-1];
-            _buildInputShape = input_shape;
-
-            kernel = add_weight("kernel", (single_shape[-1], args.Units),
-                initializer: args.KernelInitializer
-                //regularizer = self.kernel_regularizer,
-                //constraint = self.kernel_constraint,
-                //caching_device = default_caching_device,
-            );
+            return new SimpleRNNCell(new SimpleRNNCellArgs()
+            {
+                Units = args.Units,
+                Activation = args.Activation,
+                UseBias = args.UseBias,
+                KernelInitializer = args.KernelInitializer,
+                RecurrentInitializer = args.RecurrentInitializer,
+                BiasInitializer = args.BiasInitializer,
+                Dropout = args.Dropout,
+                RecurrentDropout = args.RecurrentDropout,
+                DType = args.DType,
+                Trainable = args.Trainable,
+            });
         }
     }
 }
\ No newline at end of file
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs
index 46061b211..c77f77790 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/SimpleRNNCell.cs
@@ -4,47 +4,116 @@
 using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Common.Types;
+using Tensorflow.Common.Extensions;
+using Tensorflow.Keras.Utils;
+using Tensorflow.Graphs;
 
 namespace Tensorflow.Keras.Layers.Rnn
 {
-    public class SimpleRNNCell : Layer
+    /// <summary>
+    /// Cell class for SimpleRNN.
+    /// See [the Keras RNN API guide](https://www.tensorflow.org/guide/keras/rnn)
+    /// for details about the usage of RNN API.
+    /// This class processes one step within the whole time sequence input, whereas
+    /// `tf.keras.layer.SimpleRNN` processes the whole sequence.
+    /// </summary>
+    public class SimpleRNNCell : DropoutRNNCellMixin
     {
-        SimpleRNNArgs args;
-        IVariableV1 kernel;
-        IVariableV1 recurrent_kernel;
-        IVariableV1 bias;
+        SimpleRNNCellArgs _args;
+        IVariableV1 _kernel;
+        IVariableV1 _recurrent_kernel;
+        IVariableV1 _bias;
+        INestStructure<long> _state_size;
+        INestStructure<long> _output_size;
 
-        public SimpleRNNCell(SimpleRNNArgs args) : base(args)
+        public override INestStructure<long> StateSize => _state_size;
+        public override INestStructure<long> OutputSize => _output_size;
+        public override bool SupportOptionalArgs => false;
+
+        public SimpleRNNCell(SimpleRNNCellArgs args) : base(args)
         {
-            this.args = args;
+            this._args = args;
+            if (args.Units <= 0)
+            {
+                throw new ValueError(
+                            $"units must be a positive integer, got {args.Units}");
+            }
+            this._args.Dropout = Math.Min(1f, Math.Max(0f, this._args.Dropout));
+            this._args.RecurrentDropout = Math.Min(1f, Math.Max(0f, this._args.RecurrentDropout));
+            _state_size = new NestNode<long>(args.Units);
+            _output_size = new NestNode<long>(args.Units);
         }
 
         public override void build(KerasShapesWrapper input_shape)
         {
+            // TODO(Rinne): add the cache.
             var single_shape = input_shape.ToSingleShape();
             var input_dim = single_shape[-1];
 
-            kernel = add_weight("kernel", (single_shape[-1], args.Units),
-                initializer: args.KernelInitializer
+            _kernel = add_weight("kernel", (single_shape[-1], _args.Units),
+                initializer: _args.KernelInitializer
             );
 
-            recurrent_kernel = add_weight("recurrent_kernel", (args.Units, args.Units),
-                initializer: args.RecurrentInitializer
+            _recurrent_kernel = add_weight("recurrent_kernel", (_args.Units, _args.Units),
+                initializer: _args.RecurrentInitializer
             );
 
-            if (args.UseBias)
+            if (_args.UseBias)
             {
-                bias = add_weight("bias", (args.Units),
-                    initializer: args.BiasInitializer
+                _bias = add_weight("bias", (_args.Units),
+                    initializer: _args.BiasInitializer
                 );
             }
 
             built = true;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        // TODO(Rinne): revise the trining param (with refactoring of the framework)
+        protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
-            return base.Call(inputs, state, training);
+            // TODO(Rinne): check if it will have multiple tensors when not nested.
+            Tensors prev_output = Nest.IsNested(states) ? new Tensors(states[0]) : states;
+            var dp_mask = get_dropout_mask_for_cell(inputs, training.Value);
+            var rec_dp_mask = get_recurrent_dropout_mask_for_cell(prev_output, training.Value);
+
+            Tensor h;
+            var ranks = inputs.rank;
+            if (dp_mask != null)
+            {
+
+                h = math_ops.matmul(math_ops.multiply(inputs.Single, dp_mask.Single), _kernel.AsTensor());
+            }
+            else
+            {
+                h = math_ops.matmul(inputs, _kernel.AsTensor());
+            }
+
+            if (_bias != null)
+            {
+                h = tf.nn.bias_add(h, _bias);
+            }
+
+            if (rec_dp_mask != null)
+            {
+                prev_output = math_ops.multiply(prev_output, rec_dp_mask);
+            }
+            Tensor output = h + math_ops.matmul(prev_output, _recurrent_kernel.AsTensor());
+
+            if (_args.Activation != null)
+            {
+                output = _args.Activation.Apply(output);
+            }
+            if (Nest.IsNested(states))
+            {
+                return new Nest<Tensor>(new List<Nest<Tensor>> { 
+                    new Nest<Tensor>(new List<Nest<Tensor>> { new Nest<Tensor>(output) }), new Nest<Tensor>(output) })
+                    .ToTensors();
+            }
+            else
+            {
+                return new Tensors(output, output);
+            }
         }
     }
 }
diff --git a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs
index 20962df1f..8799bfb23 100644
--- a/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs
+++ b/src/TensorFlowNET.Keras/Layers/Rnn/StackedRNNCells.cs
@@ -1,29 +1,27 @@
 using System;
-using System.Collections.Generic;
 using System.ComponentModel;
-using Tensorflow.Keras.ArgsDefinition;
+using System.Linq;
+using Tensorflow.Common.Extensions;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.ArgsDefinition.Rnn;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Keras.Saving;
+using Tensorflow.Keras.Utils;
 
 namespace Tensorflow.Keras.Layers.Rnn
 {
-    public class StackedRNNCells : Layer, RNNArgs.IRnnArgCell
+    public class StackedRNNCells : Layer, IRnnCell
     {
-        public IList<RnnCell> Cells { get; set; }
-        public bool reverse_state_order;
+        public IList<IRnnCell> Cells { get; set; }
+        public bool _reverse_state_order;
 
-        public StackedRNNCells(StackedRNNCellsArgs args) : base(args)
+        public StackedRNNCells(IEnumerable<IRnnCell> cells, StackedRNNCellsArgs args) : base(args)
         {
-            if (args.Kwargs == null)
-            {
-                args.Kwargs = new Dictionary<string, object>();
-            }
+            Cells = cells.ToList(); 
 
-            Cells = args.Cells;
-            reverse_state_order = (bool)args.Kwargs.Get("reverse_state_order", false);
+            _reverse_state_order = args.ReverseStateOrder;
 
-            if (reverse_state_order)
+            if (_reverse_state_order)
             {
                 throw new WarningException("reverse_state_order=True in StackedRNNCells will soon " +
                                            "be deprecated. Please update the code to work with the " +
@@ -32,109 +30,104 @@ public StackedRNNCells(StackedRNNCellsArgs args) : base(args)
             }
         }
 
-        public object state_size
+        public bool SupportOptionalArgs => false;
+
+        public INestStructure<long> StateSize
         {
-            get => throw new NotImplementedException();
-            //@property
-            //def state_size(self) :
-            //    return tuple(c.state_size for c in
-            //                 (self.cells[::- 1] if self.reverse_state_order else self.cells))
+            get
+            {
+                if (_reverse_state_order)
+                {
+                    var state_sizes = Cells.Reverse().Select(cell => cell.StateSize);
+                    return new Nest<long>(state_sizes);
+                }
+                else
+                {
+                    var state_sizes = Cells.Select(cell => cell.StateSize);
+                    return new Nest<long>(state_sizes);
+                }
+            }
         }
 
-        public object output_size
+        public INestStructure<long> OutputSize
         {
             get
             {
-                var lastCell = Cells[Cells.Count - 1];
-
-                if (lastCell.output_size != -1)
+                var lastCell = Cells.Last();
+                if(lastCell.OutputSize is not null)
                 {
-                    return lastCell.output_size;
+                    return lastCell.OutputSize;
                 }
-                else if (RNN._is_multiple_state(lastCell.state_size))
+                else if (RnnUtils.is_multiple_state(lastCell.StateSize))
                 {
-                    // return ((dynamic)Cells[-1].state_size)[0];
-                    throw new NotImplementedException("");
+                    return new NestNode<long>(lastCell.StateSize.Flatten().First());
                 }
                 else
                 {
-                    return Cells[-1].state_size;
+                    return lastCell.StateSize;
                 }
             }
         }
 
-        public object get_initial_state()
+        public Tensors GetInitialState(Tensors inputs = null, Tensor batch_size = null, TF_DataType dtype = TF_DataType.DtInvalid)
         {
-            throw new NotImplementedException();
-            //  def get_initial_state(self, inputs= None, batch_size= None, dtype= None) :
-            //    initial_states = []
-            //    for cell in self.cells[::- 1] if self.reverse_state_order else self.cells:
-            //      get_initial_state_fn = getattr(cell, 'get_initial_state', None)
-            //      if get_initial_state_fn:
-            //        initial_states.append(get_initial_state_fn(
-            //            inputs=inputs, batch_size=batch_size, dtype=dtype))
-            //      else:
-            //        initial_states.append(_generate_zero_filled_state_for_cell(
-            //            cell, inputs, batch_size, dtype))
-
-            //    return tuple(initial_states)
+            var cells = _reverse_state_order ? Cells.Reverse() : Cells;
+            List<Tensor> initial_states = new List<Tensor>();
+            foreach (var cell in cells)
+            {
+                initial_states.Add(cell.GetInitialState(inputs, batch_size, dtype));
+            }
+            return new Tensors(initial_states);
         }
 
-        public object call()
+        protected override Tensors Call(Tensors inputs, Tensors states = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
-            throw new NotImplementedException();
-            //  def call(self, inputs, states, constants= None, training= None, ** kwargs):
-            //    # Recover per-cell states.
-            //    state_size = (self.state_size[::- 1]
-            //                  if self.reverse_state_order else self.state_size)
-            //    nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))
-
-            //    # Call the cells in order and store the returned states.
-            //    new_nested_states = []
-            //    for cell, states in zip(self.cells, nested_states) :
-            //      states = states if nest.is_nested(states) else [states]
-            //# TF cell does not wrap the state into list when there is only one state.
-            //    is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None
-            //      states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
-            //      if generic_utils.has_arg(cell.call, 'training'):
-            //        kwargs['training'] = training
-            //      else:
-            //        kwargs.pop('training', None)
-            //      # Use the __call__ function for callable objects, eg layers, so that it
-            //      # will have the proper name scopes for the ops, etc.
-            //      cell_call_fn = cell.__call__ if callable(cell) else cell.call
-            //      if generic_utils.has_arg(cell.call, 'constants'):
-            //        inputs, states = cell_call_fn(inputs, states,
-            //                                      constants= constants, ** kwargs)
-            //      else:
-            //        inputs, states = cell_call_fn(inputs, states, ** kwargs)
-            //      new_nested_states.append(states)
-
-            //    return inputs, nest.pack_sequence_as(state_size,
-            //                                         nest.flatten(new_nested_states))
+            // Recover per-cell states.
+            var state_size = _reverse_state_order ? new NestList<long>(StateSize.Flatten().Reverse()) : StateSize;
+            var nested_states = Nest.PackSequenceAs(state_size, Nest.Flatten(states).ToArray());
+
+            var new_nest_states = Nest<Tensor>.Empty;
+            // Call the cells in order and store the returned states.
+            foreach (var (cell, internal_states) in zip(Cells, nested_states))
+            {
+                RnnOptionalArgs? rnn_optional_args = optional_args as RnnOptionalArgs;
+                Tensors? constants = rnn_optional_args?.Constants;
+
+                Tensors new_states;
+                (inputs, new_states) = cell.Apply(inputs, internal_states, optional_args: new RnnOptionalArgs() { Constants = constants });
+
+                new_nest_states = new_nest_states.MergeWith(new_states);
+            }
+            return Tensors.FromNest((inputs, Nest.PackSequenceAs(state_size, Nest.Flatten(new_nest_states).ToArray())));
         }
 
-        public void build()
+        public override void build(KerasShapesWrapper input_shape)
         {
-            throw new NotImplementedException();
-            //  @tf_utils.shape_type_conversion
-            //  def build(self, input_shape) :
-            //    if isinstance(input_shape, list) :
-            //      input_shape = input_shape[0]
-            //    for cell in self.cells:
-            //      if isinstance(cell, Layer) and not cell.built:
-            //        with K.name_scope(cell.name):
-            //          cell.build(input_shape)
-            //          cell.built = True
-            //      if getattr(cell, 'output_size', None) is not None:
-            //        output_dim = cell.output_size
-            //      elif _is_multiple_state(cell.state_size) :
-            //        output_dim = cell.state_size[0]
-            //      else:
-            //        output_dim = cell.state_size
-            //      input_shape = tuple([input_shape[0]] +
-            //                          tensor_shape.TensorShape(output_dim).as_list())
-            //    self.built = True
+            var shape = input_shape.ToSingleShape();
+            foreach(var cell in Cells)
+            {
+                if(cell is Layer layer && !layer.Built)
+                {
+                    // ignored the name scope.
+                    layer.build(shape);
+                    layer.Built = true;
+                }
+                INestStructure<long> output_dim;
+                if(cell.OutputSize is not null)
+                {
+                    output_dim = cell.OutputSize;
+                }
+                else if (RnnUtils.is_multiple_state(cell.StateSize))
+                {
+                    output_dim = new NestNode<long>(cell.StateSize.Flatten().First());
+                }
+                else
+                {
+                    output_dim = cell.StateSize;
+                }
+                shape = new Shape(new long[] { shape.dims[0] }.Concat(output_dim.Flatten()).ToArray());
+            }
+            this.Built = true;
         }
 
         public override IKerasConfig get_config()
diff --git a/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs b/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs
index 1ac4a277c..6dfec3196 100644
--- a/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs
+++ b/src/TensorFlowNET.Keras/Layers/TensorFlowOpLayer.cs
@@ -10,6 +10,7 @@
 using static Tensorflow.Binding;
 using Tensorflow.Functions;
 using System.Threading;
+using Tensorflow.Common.Types;
 
 namespace Tensorflow.Keras.Layers
 {
@@ -34,7 +35,7 @@ public TensorFlowOpLayer(TensorFlowOpLayerArgs args)
             built = true;
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optional_args = null)
         {
             if (tf.Context.executing_eagerly())
                 return DeFunCall(inputs);
diff --git a/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs b/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs
index be6a49ec5..3c2f8a7be 100644
--- a/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs
+++ b/src/TensorFlowNET.Keras/Metrics/metrics_utils.cs
@@ -304,7 +304,7 @@ private static Tensor _filter_top_k(Tensor x, int k)
         var NEG_INF = -1e10;
         var (_, top_k_idx) = tf.math.top_k(x, k, sorted: false);
         var top_k_mask = tf.reduce_sum(
-            tf.one_hot(top_k_idx, (int)x.shape[-1], axis: -1), axis: -2);
+            tf.one_hot(top_k_idx.Single, (int)x.shape[-1], axis: -1), axis: -2);
         return x * top_k_mask + NEG_INF * (1 - top_k_mask);
     }
 }
diff --git a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs
index fa19987b1..4acae4265 100644
--- a/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs
+++ b/src/TensorFlowNET.Keras/Preprocessings/Preprocessing.image_dataset_from_directory.cs
@@ -129,7 +129,7 @@ public IDatasetV2 timeseries_dataset_from_array(Tensor data, int sequence_length
             var indices = z.map(m =>
             {
                 var (i, positions) = m;
-                return tf.range(positions[i], positions[i] + sequence_length_tensor * sampling_rate_tensor, sampling_rate_tensor);
+                return tf.range(positions.Single[i], positions.Single[i] + sequence_length_tensor * sampling_rate_tensor, sampling_rate_tensor);
             }, num_parallel_calls: -1);
             var dataset = sequences_from_indices(data, indices, start_index, end_index);
 
diff --git a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs
index a26879e0c..396ad20eb 100644
--- a/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs
+++ b/src/TensorFlowNET.Keras/Saving/KerasObjectLoader.cs
@@ -8,7 +8,7 @@
 using System.Linq;
 using System.Reflection;
 using System.Text.RegularExpressions;
-using Tensorflow.Extensions;
+using Tensorflow.Common.Extensions;
 using Tensorflow.Framework.Models;
 using Tensorflow.Keras.ArgsDefinition;
 using Tensorflow.Keras.Engine;
diff --git a/src/TensorFlowNET.Keras/Utils/RnnUtils.cs b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs
new file mode 100644
index 000000000..e8700c1f2
--- /dev/null
+++ b/src/TensorFlowNET.Keras/Utils/RnnUtils.cs
@@ -0,0 +1,103 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Text;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.Layers.Rnn;
+using Tensorflow.Common.Extensions;
+
+namespace Tensorflow.Keras.Utils
+{
+    internal static class RnnUtils
+    {
+        internal static Tensors generate_zero_filled_state(Tensor batch_size_tensor, INestStructure<long> state_size, TF_DataType dtype)
+        {
+            Func<long, Tensor> create_zeros = (unnested_state_size) =>
+            {
+                var flat_dims = new Shape(unnested_state_size).dims;
+                var init_state_size = new Tensor[] { batch_size_tensor }.
+                    Concat(flat_dims.Select(x => tf.constant(x, dtypes.int32))).ToArray();
+                return array_ops.zeros(init_state_size, dtype: dtype);
+            };
+
+            // TODO(Rinne): map structure with nested tensors.
+            if(state_size.TotalNestedCount > 1)
+            {
+                return new Tensors(state_size.Flatten().Select(s => create_zeros(s)).ToArray());
+            }
+            else
+            {
+                return create_zeros(state_size.Flatten().First());
+            }
+
+        }
+
+        internal static Tensors generate_zero_filled_state_for_cell(IRnnCell cell, Tensors inputs, Tensor batch_size, TF_DataType dtype)
+        {
+            if (inputs is not null)
+            {
+                batch_size = array_ops.shape(inputs)[0];
+                dtype = inputs.dtype;
+            }
+            return generate_zero_filled_state(batch_size, cell.StateSize, dtype);
+        }
+
+        /// <summary>
+        /// Standardizes `__call__` to a single list of tensor inputs.
+        /// 
+        /// When running a model loaded from a file, the input tensors
+        /// `initial_state` and `constants` can be passed to `RNN.__call__()` as part
+        /// of `inputs` instead of by the dedicated keyword arguments.This method
+        /// makes sure the arguments are separated and that `initial_state` and
+        /// `constants` are lists of tensors(or None).
+        /// </summary>
+        /// <param name="inputs">Tensor or list/tuple of tensors. which may include constants
+        /// and initial states.In that case `num_constant` must be specified.</param>
+        /// <param name="initial_state">Tensor or list of tensors or None, initial states.</param>
+        /// <param name="constants">Tensor or list of tensors or None, constant tensors.</param>
+        /// <param name="num_constants">Expected number of constants (if constants are passed as
+        /// part of the `inputs` list.</param>
+        /// <returns></returns>
+        internal static (Tensors, Tensors, Tensors) standardize_args(Tensors inputs, Tensors initial_state, Tensors constants, int num_constants)
+        {
+            if(inputs.Length > 1)
+            {
+                // There are several situations here:
+                // In the graph mode, __call__ will be only called once. The initial_state
+                // and constants could be in inputs (from file loading).
+                // In the eager mode, __call__ will be called twice, once during
+                // rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
+                // model.fit/train_on_batch/predict with real np data. In the second case,
+                // the inputs will contain initial_state and constants as eager tensor.
+                //
+                // For either case, the real input is the first item in the list, which
+                // could be a nested structure itself. Then followed by initial_states, which
+                // could be a list of items, or list of list if the initial_state is complex
+                // structure, and finally followed by constants which is a flat list.
+                Debug.Assert(initial_state is null && constants is null);
+                if(num_constants > 0)
+                {
+                    constants = inputs.TakeLast(num_constants).ToArray().ToTensors();
+                    inputs = inputs.SkipLast(num_constants).ToArray().ToTensors();
+                }
+                if(inputs.Length > 1)
+                {
+                    initial_state = inputs.Skip(1).ToArray().ToTensors();
+                    inputs = inputs.Take(1).ToArray().ToTensors();
+                }
+            }
+
+            return (inputs, initial_state, constants);
+        }
+
+        /// <summary>
+        /// Check whether the state_size contains multiple states.
+        /// </summary>
+        /// <param name="state_size"></param>
+        /// <returns></returns>
+        public static bool is_multiple_state(INestStructure<long> state_size)
+        {
+            return state_size.TotalNestedCount > 1;
+        }
+    }
+}
diff --git a/src/TensorflowNET.Hub/KerasLayer.cs b/src/TensorflowNET.Hub/KerasLayer.cs
index b9ca949bc..20d9851b1 100644
--- a/src/TensorflowNET.Hub/KerasLayer.cs
+++ b/src/TensorflowNET.Hub/KerasLayer.cs
@@ -1,6 +1,7 @@
 using System;
 using System.Collections.Generic;
 using System.Linq;
+using Tensorflow.Common.Types;
 using Tensorflow.Keras.Engine;
 using Tensorflow.Train;
 using Tensorflow.Training;
@@ -89,7 +90,7 @@ private void _setup_layer(bool trainable = false)
             }
         }
 
-        protected override Tensors Call(Tensors inputs, Tensor state = null, bool? training = null)
+        protected override Tensors Call(Tensors inputs, Tensors state = null, bool? training = null, IOptionalArgs? optionalArgs = null)
         {
             _check_trainability();
 
diff --git a/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs b/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs
index ac5ba15ed..29648790f 100644
--- a/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs
+++ b/test/TensorFlowNET.Keras.UnitTest/Callbacks/EarlystoppingTest.cs
@@ -2,6 +2,7 @@
 using System.Collections.Generic;
 using Tensorflow.Keras.Callbacks;
 using Tensorflow.Keras.Engine;
+using Tensorflow.NumPy;
 using static Tensorflow.KerasApi;
 
 
@@ -18,7 +19,7 @@ public void Earlystopping()
             var layers = keras.layers;
             var model = keras.Sequential(new List<ILayer>
             {
-                layers.Rescaling(1.0f / 255, input_shape: (32, 32, 3)),
+                layers.Rescaling(1.0f / 255, input_shape: (28, 28, 1)),
                 layers.Conv2D(32, 3, padding: "same", activation: keras.activations.Relu),
                 layers.MaxPooling2D(),
                 layers.Flatten(),
@@ -36,8 +37,20 @@ public void Earlystopping()
             var num_epochs = 3;
             var batch_size = 8;
 
-            var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data();
-            x_train = x_train / 255.0f;
+            var data_loader = new MnistModelLoader();
+
+            var dataset = data_loader.LoadAsync(new ModelLoadSetting
+            {
+                TrainDir = "mnist",
+                OneHot = false,
+                ValidationSize = 59900,
+            }).Result;
+
+            NDArray x1 = np.reshape(dataset.Train.Data, (dataset.Train.Data.shape[0], 28, 28, 1));
+            NDArray x2 = x1;
+
+            var x = new NDArray[] { x1, x2 };
+
             // define a CallbackParams first, the parameters you pass al least contain Model and Epochs.
             CallbackParams callback_parameters = new CallbackParams
             {
@@ -47,10 +60,8 @@ public void Earlystopping()
             // define your earlystop
             ICallback earlystop = new EarlyStopping(callback_parameters, "accuracy");
             // define a callbcaklist, then add the earlystopping to it.
-            var callbacks = new List<ICallback>();
-            callbacks.add(earlystop);
-
-            model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)], batch_size, num_epochs, callbacks: callbacks);
+            var callbacks = new List<ICallback>{ earlystop};
+            model.fit(x, dataset.Train.Labels, batch_size, num_epochs, callbacks: callbacks);
         }
 
     }
diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs
index 3de337469..f4980b82d 100644
--- a/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs
+++ b/test/TensorFlowNET.Keras.UnitTest/Layers/LayersTest.cs
@@ -144,17 +144,6 @@ public void EinsumDense()
             Assert.AreEqual(expected_output, actual_output);
         }
 
-        [TestMethod, Ignore("WIP")]
-        public void SimpleRNN()
-        {
-            var inputs = np.arange(6 * 10 * 8).reshape((6, 10, 8)).astype(np.float32);
-            /*var simple_rnn = keras.layers.SimpleRNN(4);
-            var output = simple_rnn.Apply(inputs);
-            Assert.AreEqual((32, 4), output.shape);*/
-            var simple_rnn = tf.keras.layers.SimpleRNN(4, return_sequences: true, return_state: true);
-            var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs);
-        }
-
         [TestMethod]
         public void Resizing()
         {
diff --git a/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs
new file mode 100644
index 000000000..8eeee7a88
--- /dev/null
+++ b/test/TensorFlowNET.Keras.UnitTest/Layers/Rnn.Test.cs
@@ -0,0 +1,136 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using Tensorflow.Common.Types;
+using Tensorflow.Keras.Engine;
+using Tensorflow.Keras.Layers.Rnn;
+using Tensorflow.Keras.Saving;
+using Tensorflow.NumPy;
+using Tensorflow.Train;
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+
+namespace Tensorflow.Keras.UnitTest.Layers
+{
+    [TestClass]
+    public class Rnn
+    {
+        [TestMethod]
+        public void SimpleRNNCell()
+        {
+            var cell = tf.keras.layers.SimpleRNNCell(64, dropout: 0.5f, recurrent_dropout: 0.5f);
+            var h0 = new Tensors { tf.zeros(new Shape(4, 64)) };
+            var x = tf.random.normal((4, 100));
+            var (y, h1) = cell.Apply(inputs: x, states: h0);
+            var h2 = h1;
+            Assert.AreEqual((4, 64), y.shape);
+            Assert.AreEqual((4, 64), h2[0].shape);
+        }
+
+        [TestMethod]
+        public void StackedRNNCell()
+        {
+            var inputs = tf.ones((32, 10));
+            var states = new Tensors { tf.zeros((32, 4)), tf.zeros((32, 5)) };
+            var cells = new IRnnCell[] { tf.keras.layers.SimpleRNNCell(4), tf.keras.layers.SimpleRNNCell(5) };
+            var stackedRNNCell = tf.keras.layers.StackedRNNCells(cells);
+            var (output, state) = stackedRNNCell.Apply(inputs, states);
+            Console.WriteLine(output);
+            Console.WriteLine(state.shape);
+            Assert.AreEqual((32, 5), output.shape);
+            Assert.AreEqual((32, 4), state[0].shape);
+        }
+
+        [TestMethod]
+        public void LSTMCell()
+        {
+            var inputs = tf.ones((2, 100));
+            var states = new Tensors { tf.zeros((2, 4)), tf.zeros((2, 4)) };
+            var rnn = tf.keras.layers.LSTMCell(4);
+            var (output, new_states) = rnn.Apply(inputs, states);
+            Assert.AreEqual((2, 4), output.shape);
+            Assert.AreEqual((2, 4), new_states[0].shape);
+        }
+
+        [TestMethod] 
+        public void TrainLSTMWithMnist()
+        {
+            var input = keras.Input((784));
+            var x = keras.layers.Reshape((28, 28)).Apply(input);
+            x = keras.layers.LSTM(50, return_sequences: true).Apply(x);
+            x = keras.layers.LSTM(100).Apply(x);
+            var output = keras.layers.Dense(10, activation: "softmax").Apply(x);
+
+            var model = keras.Model(input, output);
+            model.summary();
+            model.compile(keras.optimizers.Adam(), keras.losses.CategoricalCrossentropy(), new string[] { "accuracy" });
+
+            var data_loader = new MnistModelLoader();
+            var dataset = data_loader.LoadAsync(new ModelLoadSetting
+            {
+                TrainDir = "mnist",
+                OneHot = true,
+                ValidationSize = 55000,
+            }).Result;
+
+            model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 1);
+        }
+
+        [TestMethod]
+        public void SimpleRNN()
+        {
+            var input = keras.Input((784));
+            var x = keras.layers.Reshape((28, 28)).Apply(input);
+            x = keras.layers.SimpleRNN(10).Apply(x);
+            var output = keras.layers.Dense(10, activation: "softmax").Apply(x);
+
+            var model = keras.Model(input, output);
+            model.summary();
+            model.compile(keras.optimizers.Adam(), keras.losses.CategoricalCrossentropy(), new string[] { "accuracy" });
+
+            var data_loader = new MnistModelLoader();
+            var dataset = data_loader.LoadAsync(new ModelLoadSetting
+            {
+                TrainDir = "mnist",
+                OneHot = false,
+                ValidationSize = 58000,
+            }).Result;
+
+            model.fit(dataset.Train.Data, dataset.Train.Labels, batch_size: 16, epochs: 2);
+        }
+
+        [TestMethod]
+        public void RNNForSimpleRNNCell()
+        {
+            var inputs = tf.random.normal((32, 10, 8));
+            var cell = tf.keras.layers.SimpleRNNCell(10, dropout: 0.5f, recurrent_dropout: 0.5f);
+            var rnn = tf.keras.layers.RNN(cell: cell);
+            var output = rnn.Apply(inputs);
+            Assert.AreEqual((32, 10), output.shape);
+
+        }
+        [TestMethod]
+        public void RNNForStackedRNNCell()
+        {
+            var inputs = tf.random.normal((32, 10, 8));
+            var cells = new IRnnCell[] { tf.keras.layers.SimpleRNNCell(4), tf.keras.layers.SimpleRNNCell(5) };
+            var stackedRNNCell = tf.keras.layers.StackedRNNCells(cells);
+            var rnn = tf.keras.layers.RNN(cell: stackedRNNCell);
+            var output = rnn.Apply(inputs);
+            Assert.AreEqual((32, 5), output.shape);
+        }
+
+        [TestMethod]
+        public void RNNForLSTMCell()
+        {
+            var inputs = tf.ones((5, 10, 8));
+            var rnn = tf.keras.layers.RNN(tf.keras.layers.LSTMCell(4));
+            var output = rnn.Apply(inputs);
+            Console.WriteLine($"output: {output}");
+            Assert.AreEqual((5, 4), output.shape);
+        }
+    }
+}
diff --git a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs
index 6d7182e09..23dc1d44d 100644
--- a/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs
+++ b/test/TensorFlowNET.UnitTest/ManagedAPI/ControlFlowApiTest.cs
@@ -28,8 +28,8 @@ public void WhileLoopTwoInputsEagerMode()
 
             var i = tf.constant(2);
             var j = tf.constant(3);
-            Func<Tensor[], Tensor> c = (x) => tf.less(x[0] + x[1], 10);
-            Func<Tensor[], Tensor[]> b = (x) => new[] { tf.add(x[0], 1), tf.add(x[1], 1) };
+            Func<Tensors, Tensor> c = (x) => tf.less(x[0] + x[1], 10);
+            Func<Tensors, Tensors> b = (x) => new[] { tf.add(x[0], 1), tf.add(x[1], 1) };
             var r = tf.while_loop(c, b, new[] { i, j });
             Assert.AreEqual(5, (int)r[0]);
             Assert.AreEqual(6, (int)r[1]);
diff --git a/tools/TensorFlowNET.Console/SimpleRnnTest.cs b/tools/TensorFlowNET.Console/SimpleRnnTest.cs
index 9769eb655..ae6ebb8a8 100644
--- a/tools/TensorFlowNET.Console/SimpleRnnTest.cs
+++ b/tools/TensorFlowNET.Console/SimpleRnnTest.cs
@@ -20,7 +20,7 @@ public void Run()
 
             // whole_sequence_output has shape `[32, 10, 4]`.
             // final_state has shape `[32, 4]`.
-            var (whole_sequence_output, final_state) = simple_rnn.Apply(inputs);
+            var (whole_sequence_output, final_states) = simple_rnn.Apply(inputs);
         }
     }
 }
diff --git a/tools/Tensorflow.CodeGen/FunctionGenerator.cs b/tools/Tensorflow.CodeGen/FunctionGenerator.cs
index 93f9ea4e9..f3687d6b4 100644
--- a/tools/Tensorflow.CodeGen/FunctionGenerator.cs
+++ b/tools/Tensorflow.CodeGen/FunctionGenerator.cs
@@ -21,7 +21,8 @@ public void AppendFunction(OpDef op, StringBuilder sb)
             {
                 sb.Append("Operation ");
             }
-            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr))
+            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)
+                && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr))
             {
                 sb.Append("Tensor ");
             }
@@ -70,7 +71,8 @@ public void AppendFunction(OpDef op, StringBuilder sb)
                 {
                     sb.AppendLine("return null;");
                 }
-                else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr))
+                else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)
+                    && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr))
                 {
                     sb.AppendLine("return _fast_path_result[0];");
                 }
@@ -81,6 +83,14 @@ public void AppendFunction(OpDef op, StringBuilder sb)
 
                 sb.AppendLine("}"); // try
 
+                sb.Append("catch(NotOkStatusException ex1)\n{\n");
+                sb.AppendLine("throw ex1;");
+                sb.AppendLine("}"); // catch
+
+                sb.Append("catch(InvalidArgumentError ex2)\n{\n");
+                sb.AppendLine("throw ex2;");
+                sb.AppendLine("}"); // catch
+
                 sb.Append("catch(Exception)\n{\n");
                 sb.AppendLine("}"); // catch
 
@@ -149,7 +159,8 @@ public void AppendFunction(OpDef op, StringBuilder sb)
             {
                 sb.AppendLine("return _op;");
             }
-            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr))
+            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)
+                && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr))
             {
                 sb.AppendLine("return _result[0];");
             }
@@ -174,7 +185,7 @@ public void AppendArgs(OpDef op, StringBuilder sb)
                 {
                     argName = $"{argName}_";
                 }
-                if (!string.IsNullOrEmpty(arg.NumberAttr))
+                if (!string.IsNullOrEmpty(arg.NumberAttr) || !string.IsNullOrEmpty(arg.TypeListAttr))
                 {
                     sb.Append($"Tensors {argName}, ");
                 }
@@ -273,7 +284,8 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb)
             {
                 sb.Append("Operation ");
             }
-            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr))
+            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)
+                && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr))
             {
                 sb.Append("Tensor ");
             }
@@ -366,6 +378,13 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb)
                         sb.Append($"\"{attr.Name}\", {attrRealName}, ");
                     }
                 }
+                else if(attr.Type == "list(type)")
+                {
+                    if (op.InputArg.Any(x => x.TypeListAttr == attr.Name))
+                    {
+                        continue;
+                    }
+                }
                 else if(attr.Type == "int" && op.InputArg.Any(x => x.NumberAttr == attr.Name))
                 {
                     bool found = false;
@@ -408,7 +427,8 @@ public void AppendEagerFallbackDefinition(OpDef op, StringBuilder sb)
             {
                 sb.AppendLine("return null;");
             }
-            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr))
+            else if (outputArgsCount == 1 && string.IsNullOrEmpty(op.OutputArg[0].NumberAttr)
+                && string.IsNullOrEmpty(op.OutputArg[0].TypeListAttr))
             {
                 sb.AppendLine("return _result[0];");
             }
diff --git a/tools/Tensorflow.CodeGen/GenOpsWriter.cs b/tools/Tensorflow.CodeGen/GenOpsWriter.cs
index 7601acdbb..9eefca07e 100644
--- a/tools/Tensorflow.CodeGen/GenOpsWriter.cs
+++ b/tools/Tensorflow.CodeGen/GenOpsWriter.cs
@@ -39,6 +39,7 @@ public void WriteAll()
                 // Add commonly used namespaces.
                 sb.AppendLine("using Tensorflow.Eager;");
                 sb.AppendLine("using Tensorflow.Contexts;");
+                sb.AppendLine("using Tensorflow.Exceptions;");
                 sb.AppendLine("using static Tensorflow.Binding;");
                 sb.AppendLine();
 
diff --git a/tools/Tensorflow.CodeGen/OpClassifier.cs b/tools/Tensorflow.CodeGen/OpClassifier.cs
index eaad3fec8..2d22c5d22 100644
--- a/tools/Tensorflow.CodeGen/OpClassifier.cs
+++ b/tools/Tensorflow.CodeGen/OpClassifier.cs
@@ -9,7 +9,7 @@ namespace Tensorflow.CodeGen
 {
     public class OpClassifier
     {
-        private static readonly string _filenamePattern = @"^gen_[a-z]*_ops.py$";
+        private static readonly string _filenamePattern = @"^gen_[a-z_]*_ops.py$";
         private static readonly string _pythonFunctionPattern = @"def\s+(\w+\d*\w*)\((?:\s*\w+\s*(?:=\s*[\S]*)*,\s*)*\s*name=None\):";
         private Dictionary<string, HashSet<string>> _opSet = new();
         public Dictionary<string, HashSet<string>> OpSet => _opSet;
diff --git a/tools/Tensorflow.CodeGen/Program.cs b/tools/Tensorflow.CodeGen/Program.cs
index f9d44ce83..cea52e0b4 100644
--- a/tools/Tensorflow.CodeGen/Program.cs
+++ b/tools/Tensorflow.CodeGen/Program.cs
@@ -5,7 +5,7 @@
 using System.Xml.Linq;
 using Tensorflow.CodeGen;
 
-GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops",
+GenOpsWriter writer = new(@"D:\development\tf.net\gen_ops_v2",
     @"D:\Apps\miniconda3\envs\tf2.11\Lib\site-packages\tensorflow\python\ops",
     @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\api_def\base_api",
     @"D:\development\tf.net\tensorflow-2.11.0\tensorflow\core\ops\ops.pbtxt");
diff --git a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj
index 4cb3368d0..03195e6ac 100644
--- a/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj
+++ b/tools/Tensorflow.CodeGen/Tensorflow.CodeGen.csproj
@@ -9,7 +9,7 @@
 
   <ItemGroup>
     <PackageReference Include="Microsoft.CodeAnalysis.CSharp.Scripting" Version="4.6.0-1.final" />
-    <PackageReference Include="Protobuf.Text" Version="0.7.0" />
+    <PackageReference Include="Protobuf.Text" Version="0.7.1" />
   </ItemGroup>
 
   <ItemGroup>
diff --git a/tools/Tensorflow.CodeGen/Utils.cs b/tools/Tensorflow.CodeGen/Utils.cs
index d3f30d9f2..6c69b7f95 100644
--- a/tools/Tensorflow.CodeGen/Utils.cs
+++ b/tools/Tensorflow.CodeGen/Utils.cs
@@ -155,6 +155,10 @@ public static OpList ReadAllOpDefs(string path)
                 }
                 else if (attr.Type == "list(type)")
                 {
+                    if(op.InputArg.Any(x => x.TypeListAttr == attr.Name))
+                    {
+                        continue;
+                    }
                     if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.Type)
                     {
                         List<TF_DataType> values = new();
@@ -174,10 +178,25 @@ public static OpList ReadAllOpDefs(string path)
                 else if (attr.Type == "list(shape)")
                 {
                     res.Add((attr.Name, "Shape[]", "NOVALUE"));
+                    if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List)
+                    {
+                        List<string> exps = new();
+                        foreach (var value in attr.DefaultValue.List.Shape)
+                        {
+                            exps.Add($"new Shape({string.Join(", ", value.Dim.Select(x => x.Size))})");
+                        }
+                        string expression = "new Shape[]{" + $"{string.Join(", ", exps)}" + "}";
+                        dynamicDefaultValues[attr.Name] = expression;
+                        res.Add((attr.Name, "string[]", $"null"));
+                    }
+                    else
+                    {
+                        res.Add((attr.Name, "string[]", "NOVALUE"));
+                    }
                 }
                 else if (attr.Type == "list(string)")
                 {
-                    if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.S)
+                    if (attr.DefaultValue is not null && attr.DefaultValue.ValueCase == AttrValue.ValueOneofCase.List)
                     {
                         List<string> values = new();
                         foreach (var value in attr.DefaultValue.List.S)
@@ -231,11 +250,11 @@ public static OpList ReadAllOpDefs(string path)
                 }
                 else if (attr.Type == "func")
                 {
-                    res.Add((attr.Name, "Func<Tensors, Tensors>", "NOVALUE"));
+                    res.Add((attr.Name, "object", "NOVALUE"));
                 }
                 else if (attr.Type == "list(func)")
                 {
-                    res.Add((attr.Name, "Func<Tensors, Tensors>[]", "NOVALUE"));
+                    res.Add((attr.Name, "object[]", "NOVALUE"));
                 }
                 else if (attr.Type == "tensor")
                 {