{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "Tce3stUlHN0L" }, "source": [ "##### Copyright 2020 The TensorFlow Authors." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "cellView": "form", "execution": { "iopub.execute_input": "2022-04-27T09:10:30.993660Z", "iopub.status.busy": "2022-04-27T09:10:30.993128Z", "iopub.status.idle": "2022-04-27T09:10:30.996797Z", "shell.execute_reply": "2022-04-27T09:10:30.996333Z" }, "id": "tuOe1ymfHZPu" }, "outputs": [], "source": [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# https://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License." ] }, { "cell_type": "markdown", "metadata": { "id": "23R0Z9RojXYW" }, "source": [ "# MLMD Model Card Toolkit Demo" ] }, { "cell_type": "markdown", "metadata": { "id": "MfBg1C5NB3X0" }, "source": [ "\n", " \n", " \n", " \n", " \n", "
\n", " View on TensorFlow.org\n", " \n", " Run in Google Colab\n", " \n", " View on GitHub\n", " \n", " Download notebook\n", "
" ] }, { "cell_type": "markdown", "metadata": { "id": "sfSQ-kX-MLEr" }, "source": [ "## Background\n", "\n", "This notebook demonstrates how to generate a model card using the Model Card Toolkit with MLMD and TFX pipeline in a Jupyter/Colab environment. You can learn more about model cards at https://modelcards.withgoogle.com/about. \n", "\n" ] }, { "cell_type": "markdown", "metadata": { "id": "2GivNBNYjb3b" }, "source": [ "## Setup\n", "We first need to a) install and import the necessary packages, and b) download the data." ] }, { "cell_type": "markdown", "metadata": { "id": "Fmgi8ZvQkScg" }, "source": [ "### Upgrade to Pip 21 (or later) and Install Model Card Toolkit" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:31.000143Z", "iopub.status.busy": "2022-04-27T09:10:30.999666Z", "iopub.status.idle": "2022-04-27T09:10:36.810951Z", "shell.execute_reply": "2022-04-27T09:10:36.810262Z" }, "id": "NYtxxdriz5VO" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: pip==21.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (21.3)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\u001b[33mWARNING: You are using pip version 21.3; however, version 22.0.4 is available.\r\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\u001b[0m\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: model-card-toolkit in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (1.3.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: semantic-version<3,>=2.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (2.9.0)\r\n", "Requirement already satisfied: tensorflow-metadata<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: tfx<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: absl-py<1.1,>=0.9 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (0.12.0)\r\n", "Requirement already satisfied: ml-metadata<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: matplotlib<4,>=3.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (3.5.1)\r\n", "Requirement already satisfied: jsonschema<4,>=3.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (3.2.0)\r\n", "Requirement already satisfied: tensorflow-model-analysis<0.37.0,>=0.36.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (0.36.0)\r\n", "Requirement already satisfied: jinja2<3,>=2.10 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from model-card-toolkit) (2.11.3)\r\n", "Requirement already satisfied: six in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from absl-py<1.1,>=0.9->model-card-toolkit) (1.16.0)\r\n", "Requirement already satisfied: MarkupSafe>=0.23 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jinja2<3,>=2.10->model-card-toolkit) (2.0.1)\r\n", "Requirement already satisfied: importlib-metadata in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jsonschema<4,>=3.2.0->model-card-toolkit) (4.11.3)\r\n", "Requirement already satisfied: pyrsistent>=0.14.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jsonschema<4,>=3.2.0->model-card-toolkit) (0.18.1)\r\n", "Requirement already satisfied: attrs>=17.4.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jsonschema<4,>=3.2.0->model-card-toolkit) (20.3.0)\r\n", "Requirement already satisfied: setuptools in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jsonschema<4,>=3.2.0->model-card-toolkit) (62.1.0)\r\n", "Requirement already satisfied: packaging>=20.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (20.9)\r\n", "Requirement already satisfied: cycler>=0.10 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (0.11.0)\r\n", "Requirement already satisfied: kiwisolver>=1.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (1.4.2)\r\n", "Requirement already satisfied: numpy>=1.17 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (1.19.5)\r\n", "Requirement already satisfied: pyparsing>=2.2.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (2.4.7)\r\n", "Requirement already satisfied: python-dateutil>=2.7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (2.8.2)\r\n", "Requirement already satisfied: pillow>=6.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (9.1.0)\r\n", "Requirement already satisfied: fonttools>=4.22.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from matplotlib<4,>=3.2.0->model-card-toolkit) (4.33.3)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: grpcio<2,>=1.8.6 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ml-metadata<1.6.0,>=1.5.0->model-card-toolkit) (1.46.0rc2)\r\n", "Requirement already satisfied: protobuf<4,>=3.13 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ml-metadata<1.6.0,>=1.5.0->model-card-toolkit) (3.20.1)\r\n", "Requirement already satisfied: googleapis-common-protos<2,>=1.52.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-metadata<1.6.0,>=1.5.0->model-card-toolkit) (1.56.0)\r\n", "Requirement already satisfied: tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.7.1)\r\n", "Requirement already satisfied: ipywidgets<8,>=7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (7.7.0)\r\n", "Requirement already satisfied: pyarrow<6,>=1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (5.0.0)\r\n", "Requirement already satisfied: tfx-bsl<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: scipy<2,>=1.4.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.7.3)\r\n", "Requirement already satisfied: apache-beam[gcp]<3,>=2.34 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.38.0)\r\n", "Requirement already satisfied: ipython<8,>=7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (7.32.0)\r\n", "Requirement already satisfied: pandas<2,>=1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.3.5)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: portpicker<2,>=1.3.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: keras-tuner<2,>=1.0.4 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.1.2)\r\n", "Requirement already satisfied: google-apitools<1,>=0.5 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.5.31)\r\n", "Requirement already satisfied: pyyaml<6,>=3.12 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (5.4.1)\r\n", "Requirement already satisfied: tensorflow-transform<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: google-cloud-aiplatform<2,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.12.1)\r\n", "Requirement already satisfied: click<8,>=7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (7.1.2)\r\n", "Requirement already satisfied: tensorflow-serving-api!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.7.0)\r\n", "Requirement already satisfied: ml-pipelines-sdk==1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: google-cloud-bigquery<3,>=2.26.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.34.3)\r\n", "Requirement already satisfied: docker<5,>=4.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (4.4.4)\r\n", "Requirement already satisfied: tensorflow-hub<0.13,>=0.9.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.12.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: kubernetes<13,>=10.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (12.0.1)\r\n", "Requirement already satisfied: tensorflow-data-validation<1.6.0,>=1.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: google-api-python-client<2,>=1.8 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.12.11)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: httplib2<0.20.0,>=0.8 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.19.1)\r\n", "Requirement already satisfied: pymongo<4.0.0,>=3.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.12.3)\r\n", "Requirement already satisfied: dill<0.3.2,>=0.3.1.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.3.1.1)\r\n", "Requirement already satisfied: typing-extensions>=3.7.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.2.0)\r\n", "Requirement already satisfied: crcmod<2.0,>=1.7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.7)\r\n", "Requirement already satisfied: fastavro<2,>=0.23.6 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.4.11)\r\n", "Requirement already satisfied: pydot<2,>=1.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.4.2)\r\n", "Requirement already satisfied: oauth2client<5,>=2.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.1.3)\r\n", "Requirement already satisfied: hdfs<3.0.0,>=2.1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.7.0)\r\n", "Requirement already satisfied: cloudpickle<3,>=2.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.0.0)\r\n", "Requirement already satisfied: pytz>=2018.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2022.1)\r\n", "Requirement already satisfied: proto-plus<2,>=1.7.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.20.3)\r\n", "Requirement already satisfied: requests<3.0.0,>=2.24.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.27.1)\r\n", "Requirement already satisfied: orjson<4.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.6.8)\r\n", "Requirement already satisfied: google-auth<3,>=1.18.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.35.0)\r\n", "Requirement already satisfied: google-cloud-datastore<2,>=1.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.15.4)\r\n", "Requirement already satisfied: google-cloud-pubsub<3,>=2.1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.12.0)\r\n", "Requirement already satisfied: google-cloud-language<2,>=1.3.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.3.1)\r\n", "Requirement already satisfied: grpcio-gcp<1,>=0.2.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.2)\r\n", "Requirement already satisfied: google-cloud-spanner<2,>=1.13.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.19.2)\r\n", "Requirement already satisfied: google-cloud-vision<2,>=0.38.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.0.1)\r\n", "Requirement already satisfied: google-cloud-videointelligence<2,>=1.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.16.2)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: google-cloud-bigtable<2,>=0.31.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.7.1)\r\n", "Requirement already satisfied: cachetools<5,>=3.1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.2.4)\r\n", "Requirement already satisfied: google-cloud-core<2,>=0.28.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.7.2)\r\n", "Requirement already satisfied: google-cloud-pubsublite<2,>=1.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.4.2)\r\n", "Requirement already satisfied: google-cloud-bigquery-storage>=2.6.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.13.1)\r\n", "Requirement already satisfied: google-cloud-dlp<4,>=3.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.6.2)\r\n", "Requirement already satisfied: google-cloud-recommendations-ai<=0.2.0,>=0.1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.0)\r\n", "Requirement already satisfied: websocket-client>=0.32.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from docker<5,>=4.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.3.2)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-api-python-client<2,>=1.8->tfx<1.6.0,>=1.5.0->model-card-toolkit) (3.0.1)\r\n", "Requirement already satisfied: google-auth-httplib2>=0.0.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-api-python-client<2,>=1.8->tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.1.0)\r\n", "Requirement already satisfied: google-api-core<3dev,>=1.21.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-api-python-client<2,>=1.8->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.31.5)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: fasteners>=0.14 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-apitools<1,>=0.5->tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.17.3)\r\n", "Requirement already satisfied: google-cloud-resource-manager<3.0.0dev,>=1.3.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-aiplatform<2,>=1.5.0->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.4.1)\r\n", "Requirement already satisfied: google-cloud-storage<3.0.0dev,>=1.32.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-aiplatform<2,>=1.5.0->tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.2.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-bigquery<3,>=2.26.0->tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.3.2)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: pygments in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.12.0)\r\n", "Requirement already satisfied: decorator in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (5.1.1)\r\n", "Requirement already satisfied: traitlets>=4.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (5.1.1)\r\n", "Requirement already satisfied: pickleshare in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.7.5)\r\n", "Requirement already satisfied: backcall in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.0)\r\n", "Requirement already satisfied: pexpect>4.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.8.0)\r\n", "Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.0.29)\r\n", "Requirement already satisfied: matplotlib-inline in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.1.3)\r\n", "Requirement already satisfied: jedi>=0.16 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.18.1)\r\n", "Requirement already satisfied: widgetsnbextension~=3.6.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.6.0)\r\n", "Requirement already satisfied: jupyterlab-widgets>=1.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.1.0)\r\n", "Requirement already satisfied: nbformat>=4.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (5.3.0)\r\n", "Requirement already satisfied: ipython-genutils~=0.2.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.0)\r\n", "Requirement already satisfied: ipykernel>=4.5.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (6.13.0)\r\n", "Requirement already satisfied: tensorboard in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.8.0)\r\n", "Requirement already satisfied: kt-legacy in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.0.4)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: certifi>=14.05.14 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from kubernetes<13,>=10.0.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (2021.10.8)\r\n", "Requirement already satisfied: requests-oauthlib in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from kubernetes<13,>=10.0.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.3.1)\r\n", "Requirement already satisfied: urllib3>=1.24.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from kubernetes<13,>=10.0.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.26.9)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: psutil in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from portpicker<2,>=1.3.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (5.9.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: keras<2.8,>=2.7.0rc0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.7.0)\r\n", "Requirement already satisfied: libclang>=9.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (14.0.1)\r\n", "Requirement already satisfied: keras-preprocessing>=1.1.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.1.2)\r\n", "Requirement already satisfied: google-pasta>=0.1.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.0)\r\n", "Requirement already satisfied: h5py>=2.9.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.6.0)\r\n", "Requirement already satisfied: flatbuffers<3.0,>=1.12 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.12)\r\n", "Requirement already satisfied: opt-einsum>=2.3.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.3.0)\r\n", "Requirement already satisfied: wrapt>=1.11.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.14.0)\r\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.21.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.25.0)\r\n", "Requirement already satisfied: gast<0.5.0,>=0.2.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.4.0)\r\n", "Requirement already satisfied: wheel<1.0,>=0.32.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.37.1)\r\n", "Requirement already satisfied: tensorflow-estimator<2.8,~=2.7.0rc0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.7.0)\r\n", "Requirement already satisfied: termcolor>=1.1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.1.0)\r\n", "Requirement already satisfied: astunparse>=1.6.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.6.3)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: joblib<0.15,>=0.12 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorflow-data-validation<1.6.0,>=1.5.0->tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.14.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: zipp>=0.5 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from importlib-metadata->jsonschema<4,>=3.2.0->model-card-toolkit) (3.8.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: pyasn1-modules>=0.2.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.8)\r\n", "Requirement already satisfied: rsa<5,>=3.1.4 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-auth<3,>=1.18.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.8)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: grpc-google-iam-v1<0.13dev,>=0.12.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-bigtable<2,>=0.31.1->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.12.4)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: grpcio-status>=1.16.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-pubsub<3,>=2.1.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.44.0)\r\n", "Requirement already satisfied: overrides<7.0.0,>=6.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-cloud-pubsublite<2,>=1.2.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (6.1.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery<3,>=2.26.0->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.3.0)\r\n", "Requirement already satisfied: cached-property in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from h5py>=2.9.0->tensorflow!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,<3,>=1.15.2->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.5.2)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: docopt in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from hdfs<3.0.0,>=2.1.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.6.2)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: nest-asyncio in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.5.5)\r\n", "Requirement already satisfied: jupyter-client>=6.1.12 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (7.3.0)\r\n", "Requirement already satisfied: debugpy>=1.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.6.0)\r\n", "Requirement already satisfied: tornado>=6.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (6.1)\r\n", "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jedi>=0.16->ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.8.3)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: fastjsonschema in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.15.3)\r\n", "Requirement already satisfied: jupyter-core in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbformat>=4.2.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.10.0)\r\n", "Requirement already satisfied: pyasn1>=0.1.7 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from oauth2client<5,>=2.0.1->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.4.8)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: ptyprocess>=0.5 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from pexpect>4.3->ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.7.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: wcwidth in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.5)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: idna<4,>=2.5 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (3.3)\r\n", "Requirement already satisfied: charset-normalizer~=2.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from requests<3.0.0,>=2.24.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.0.12)\r\n", "Requirement already satisfied: markdown>=2.6.8 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (3.3.6)\r\n", "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.4.6)\r\n", "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (1.8.1)\r\n", "Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (0.6.1)\r\n", "Requirement already satisfied: werkzeug>=0.11.15 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from tensorboard->keras-tuner<2,>=1.0.4->tfx<1.6.0,>=1.5.0->model-card-toolkit) (2.1.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: notebook>=4.4.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (6.4.11)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: oauthlib>=3.0.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from requests-oauthlib->kubernetes<13,>=10.0.1->tfx<1.6.0,>=1.5.0->model-card-toolkit) (3.2.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: entrypoints in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jupyter-client>=6.1.12->ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.4)\r\n", "Requirement already satisfied: pyzmq>=22.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from jupyter-client>=6.1.12->ipykernel>=4.5.1->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (22.3.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: prometheus-client in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.14.1)\r\n", "Requirement already satisfied: nbconvert>=5 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (6.4.5)\r\n", "Requirement already satisfied: Send2Trash>=1.8.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.8.0)\r\n", "Requirement already satisfied: terminado>=0.8.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.13.3)\r\n", "Requirement already satisfied: argon2-cffi in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (21.3.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: typing-utils>=0.0.3 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from overrides<7.0.0,>=6.0.1->google-cloud-pubsublite<2,>=1.2.0->apache-beam[gcp]<3,>=2.34->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.1.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: defusedxml in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.7.1)\r\n", "Requirement already satisfied: nbclient<0.6.0,>=0.5.0 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.5.13)\r\n", "Requirement already satisfied: pandocfilters>=1.4.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.5.0)\r\n", "Requirement already satisfied: bleach in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (5.0.0)\r\n", "Requirement already satisfied: mistune<2,>=0.8.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.8.4)\r\n", "Requirement already satisfied: jupyterlab-pygments in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.2.2)\r\n", "Requirement already satisfied: testpath in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.6.0)\r\n", "Requirement already satisfied: beautifulsoup4 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (4.11.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: argon2-cffi-bindings in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from argon2-cffi->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (21.2.0)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: cffi>=1.0.1 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from argon2-cffi-bindings->argon2-cffi->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (1.15.0)\r\n", "Requirement already satisfied: soupsieve>1.2 in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from beautifulsoup4->nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.3.2.post1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: webencodings in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from bleach->nbconvert>=5->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (0.5.1)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Requirement already satisfied: pycparser in /tmpfs/src/tf_docs_env/lib/python3.7/site-packages (from cffi>=1.0.1->argon2-cffi-bindings->argon2-cffi->notebook>=4.4.1->widgetsnbextension~=3.6.0->ipywidgets<8,>=7->tensorflow-model-analysis<0.37.0,>=0.36.0->model-card-toolkit) (2.21)\r\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\u001b[33mWARNING: You are using pip version 21.3; however, version 22.0.4 is available.\r\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\u001b[0m\r\n" ] } ], "source": [ "!pip install --upgrade pip==21.3\n", "!pip install model-card-toolkit" ] }, { "cell_type": "markdown", "metadata": { "id": "EwT0nov5QO1M" }, "source": [ "####*Did you restart the runtime?*\n", "\n", "If you are using Google Colab, the runtime must be restarted after installing new packages." ] }, { "cell_type": "markdown", "metadata": { "id": "N-ePgV0Lj68Q" }, "source": [ "### Import packages\n", "\n", "We import necessary packages, including standard TFX component classes and check the library versions.\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:36.815205Z", "iopub.status.busy": "2022-04-27T09:10:36.814686Z", "iopub.status.idle": "2022-04-27T09:10:40.101377Z", "shell.execute_reply": "2022-04-27T09:10:40.100772Z" }, "id": "YIqpWK9efviJ" }, "outputs": [], "source": [ "import os\n", "import pprint\n", "import tempfile\n", "import urllib\n", "\n", "import absl\n", "import random\n", "import tensorflow.compat.v2 as tf\n", "import tensorflow_model_analysis as tfma\n", "tf.get_logger().propagate = False\n", "pp = pprint.PrettyPrinter()\n", "\n", "import tfx\n", "from tfx.components import CsvExampleGen\n", "from tfx.components import Evaluator\n", "from tfx.components import Pusher\n", "from tfx.components import SchemaGen\n", "from tfx.components import StatisticsGen\n", "from tfx.components import Trainer\n", "from tfx.components import Transform\n", "from tfx.components.trainer.executor import GenericExecutor\n", "from tfx.dsl.components.base import executor_spec\n", "from tfx.dsl.experimental import latest_blessed_model_resolver\n", "from tfx.orchestration import metadata\n", "from tfx.orchestration import pipeline\n", "from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext\n", "from tfx.proto import pusher_pb2\n", "from tfx.proto import trainer_pb2\n", "from tfx.types import Channel\n", "from tfx.types.standard_artifacts import Model\n", "from tfx.types.standard_artifacts import ModelBlessing\n", "\n", "import ml_metadata as mlmd" ] }, { "cell_type": "markdown", "metadata": { "id": "dDyZyTT02ysB" }, "source": [ "Ensure TensorFlow 2 is running and executing eagerly." ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:40.105209Z", "iopub.status.busy": "2022-04-27T09:10:40.104739Z", "iopub.status.idle": "2022-04-27T09:10:40.113374Z", "shell.execute_reply": "2022-04-27T09:10:40.112886Z" }, "id": "XuUdIuFIwvZR" }, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tf.enable_v2_behavior()\n", "tf.executing_eagerly()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:40.116168Z", "iopub.status.busy": "2022-04-27T09:10:40.115716Z", "iopub.status.idle": "2022-04-27T09:10:40.119402Z", "shell.execute_reply": "2022-04-27T09:10:40.118828Z" }, "id": "eZ4K18_DN2D8" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "TensorFlow version: 2.7.1\n", "TFX version: 1.5.0\n", "MLMD version: 1.5.0\n" ] } ], "source": [ "print('TensorFlow version: {}'.format(tf.__version__))\n", "print('TFX version: {}'.format(tfx.version.__version__))\n", "print('MLMD version: {}'.format(mlmd.__version__))" ] }, { "cell_type": "markdown", "metadata": { "id": "ufJKQ6OvkJlY" }, "source": [ "### Set up pipeline paths" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:40.122229Z", "iopub.status.busy": "2022-04-27T09:10:40.121780Z", "iopub.status.idle": "2022-04-27T09:10:40.124896Z", "shell.execute_reply": "2022-04-27T09:10:40.124416Z" }, "id": "ad5JLpKbf6sN" }, "outputs": [], "source": [ "# This is the root directory for your TFX pip package installation.\n", "_tfx_root = tfx.__path__\n", "\n", "# Set up logging.\n", "absl.logging.set_verbosity(absl.logging.INFO)" ] }, { "cell_type": "markdown", "metadata": { "id": "n2cMMAbSkGfX" }, "source": [ "### Download example data\n", "We download the example dataset for use in our TFX pipeline.\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:40.127667Z", "iopub.status.busy": "2022-04-27T09:10:40.127229Z", "iopub.status.idle": "2022-04-27T09:10:40.933244Z", "shell.execute_reply": "2022-04-27T09:10:40.932590Z" }, "id": "BywX6OUEhAqn" }, "outputs": [], "source": [ "DATA_PATH = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/' \\\n", " 'adult.data'\n", "_data_root = tempfile.mkdtemp(prefix='tfx-data')\n", "_data_filepath = os.path.join(_data_root, \"data.csv\")\n", "urllib.request.urlretrieve(DATA_PATH, _data_filepath)\n", "\n", "columns = [\n", " \"Age\", \"Workclass\", \"fnlwgt\", \"Education\", \"Education-Num\", \"Marital-Status\",\n", " \"Occupation\", \"Relationship\", \"Race\", \"Sex\", \"Capital-Gain\", \"Capital-Loss\",\n", " \"Hours-per-week\", \"Country\", \"Over-50K\"]\n", "\n", "with open(_data_filepath, 'r') as f:\n", " content = f.read()\n", " content = content.replace(\", <=50K\", ', 0').replace(\", >50K\", ', 1')\n", "\n", "with open(_data_filepath, 'w') as f:\n", " f.write(','.join(columns) + '\\n' + content)" ] }, { "cell_type": "markdown", "metadata": { "id": "blZC1sIQOWfH" }, "source": [ "Take a quick look at the CSV file." ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:40.937575Z", "iopub.status.busy": "2022-04-27T09:10:40.937043Z", "iopub.status.idle": "2022-04-27T09:10:41.065536Z", "shell.execute_reply": "2022-04-27T09:10:41.064452Z" }, "id": "c5YPeLPFOXaD" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Age,Workclass,fnlwgt,Education,Education-Num,Marital-Status,Occupation,Relationship,Race,Sex,Capital-Gain,Capital-Loss,Hours-per-week,Country,Over-50K\r\n", "39, State-gov, 77516, Bachelors, 13, Never-married, Adm-clerical, Not-in-family, White, Male, 2174, 0, 40, United-States, 0\r\n", "50, Self-emp-not-inc, 83311, Bachelors, 13, Married-civ-spouse, Exec-managerial, Husband, White, Male, 0, 0, 13, United-States, 0\r\n", "38, Private, 215646, HS-grad, 9, Divorced, Handlers-cleaners, Not-in-family, White, Male, 0, 0, 40, United-States, 0\r\n", "53, Private, 234721, 11th, 7, Married-civ-spouse, Handlers-cleaners, Husband, Black, Male, 0, 0, 40, United-States, 0\r\n", "28, Private, 338409, Bachelors, 13, Married-civ-spouse, Prof-specialty, Wife, Black, Female, 0, 0, 40, Cuba, 0\r\n", "37, Private, 284582, Masters, 14, Married-civ-spouse, Exec-managerial, Wife, White, Female, 0, 0, 40, United-States, 0\r\n", "49, Private, 160187, 9th, 5, Married-spouse-absent, Other-service, Not-in-family, Black, Female, 0, 0, 16, Jamaica, 0\r\n", "52, Self-emp-not-inc, 209642, HS-grad, 9, Married-civ-spouse, Exec-managerial, Husband, White, Male, 0, 0, 45, United-States, 1\r\n", "31, Private, 45781, Masters, 14, Never-married, Prof-specialty, Not-in-family, White, Female, 14084, 0, 50, United-States, 1\r\n" ] } ], "source": [ "!head {_data_filepath}" ] }, { "cell_type": "markdown", "metadata": { "id": "8ONIE_hdkPS4" }, "source": [ "### Create the InteractiveContext\n", "Last, we create an InteractiveContext, which will allow us to run TFX components interactively in this notebook." ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:41.069577Z", "iopub.status.busy": "2022-04-27T09:10:41.068953Z", "iopub.status.idle": "2022-04-27T09:10:41.075496Z", "shell.execute_reply": "2022-04-27T09:10:41.074893Z" }, "id": "0Rh6K5sUf9dd" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:InteractiveContext pipeline_root argument not provided: using temporary directory /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld as root for pipeline outputs.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:InteractiveContext metadata_connection_config not provided: using SQLite ML Metadata database at /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/metadata.sqlite.\n" ] } ], "source": [ "# Here, we create an InteractiveContext using default parameters. This will\n", "# use a temporary directory with an ephemeral ML Metadata database instance.\n", "# To use your own pipeline root or database, the optional properties\n", "# `pipeline_root` and `metadata_connection_config` may be passed to\n", "# InteractiveContext. Calls to InteractiveContext are no-ops outside of the\n", "# notebook.\n", "context = InteractiveContext(pipeline_name=\"Census Income Classification Pipeline\")" ] }, { "cell_type": "markdown", "metadata": { "id": "HdQWxfsVkzdJ" }, "source": [ "## Run TFX components interactively\n", "In the cells that follow, we create TFX components one-by-one, run each of them, and visualize their output artifacts. In this notebook, we won’t provide detailed explanations of each TFX component, but you can see what each does at [TFX Colab workshop](https://github.com/tensorflow/workshops/blob/master/tfx_labs/Lab_1_Pipeline_in_Colab.ipynb)." ] }, { "cell_type": "markdown", "metadata": { "id": "L9fwt9gQk3BR" }, "source": [ "### ExampleGen\n", "\n", "Create the `ExampleGen` component to split data into training and evaluation sets, convert the data into `tf.Example` format, and copy data into the `_tfx_root` directory for other components to access. " ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:41.078766Z", "iopub.status.busy": "2022-04-27T09:10:41.078255Z", "iopub.status.idle": "2022-04-27T09:10:52.589180Z", "shell.execute_reply": "2022-04-27T09:10:52.588596Z" }, "id": "PyXjuMt8f-9u" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for CsvExampleGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:select span and version = (0, None)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:latest span and version = (0, None)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for CsvExampleGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Generating examples.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:apache_beam.runners.interactive.interactive_environment:Dependencies required for Interactive Beam PCollection visualization are not available, please use: `pip install apache-beam[interactive]` to install necessary dependencies to enable all data visualization features.\n" ] }, { "data": { "application/javascript": [ "\n", " if (typeof window.interactive_beam_jquery == 'undefined') {\n", " var jqueryScript = document.createElement('script');\n", " jqueryScript.src = 'https://code.jquery.com/jquery-3.4.1.slim.min.js';\n", " jqueryScript.type = 'text/javascript';\n", " jqueryScript.onload = function() {\n", " var datatableScript = document.createElement('script');\n", " datatableScript.src = 'https://cdn.datatables.net/1.10.20/js/jquery.dataTables.min.js';\n", " datatableScript.type = 'text/javascript';\n", " datatableScript.onload = function() {\n", " window.interactive_beam_jquery = jQuery.noConflict(true);\n", " window.interactive_beam_jquery(document).ready(function($){\n", " \n", " });\n", " }\n", " document.head.appendChild(datatableScript);\n", " };\n", " document.head.appendChild(jqueryScript);\n", " } else {\n", " window.interactive_beam_jquery(document).ready(function($){\n", " \n", " });\n", " }" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Processing input csv data /tmpfs/tmp/tfx-datawz3771q2/* to TFExample.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:Make sure that locally built Python SDK docker image has Python 3.7 interpreter.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:apache_beam.io.tfrecordio:Couldn't find python-snappy so the implementation of _TFRecordUtil._masked_crc32c is not as fast as it could be.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Examples generated.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for CsvExampleGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f4824630350
.execution_id1
.component\n", "\n", "
CsvExampleGen at 0x7f4824620710
.inputs{}
.outputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.exec_properties
['input_base']/tmpfs/tmp/tfx-datawz3771q2
['input_config']{\n", " "splits": [\n", " {\n", " "name": "single_split",\n", " "pattern": "*"\n", " }\n", " ]\n", "}
['output_config']{\n", " "split_config": {\n", " "splits": [\n", " {\n", " "hash_buckets": 2,\n", " "name": "train"\n", " },\n", " {\n", " "hash_buckets": 1,\n", " "name": "eval"\n", " }\n", " ]\n", " }\n", "}
['output_data_format']6
['output_file_format']5
['custom_config']None
['range_config']None
['span']0
['version']None
['input_fingerprint']split:single_split,num_files:1,total_bytes:3852053,xor_checksum:1651050640,sum_checksum:1651050640
.component.inputs{}
.component.outputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
" ], "text/plain": [ "ExecutionResult(\n", " component_id: CsvExampleGen\n", " execution_id: 1\n", " outputs:\n", " examples: Channel(\n", " type_name: Examples\n", " artifacts: [Artifact(artifact: id: 1\n", " type_id: 14\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1\"\n", " properties {\n", " key: \"split_names\"\n", " value {\n", " string_value: \"[\\\"train\\\", \\\"eval\\\"]\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"file_format\"\n", " value {\n", " string_value: \"tfrecords_gzip\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"input_fingerprint\"\n", " value {\n", " string_value: \"split:single_split,num_files:1,total_bytes:3852053,xor_checksum:1651050640,sum_checksum:1651050640\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"payload_format\"\n", " value {\n", " string_value: \"FORMAT_TF_EXAMPLE\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"span\"\n", " value {\n", " int_value: 0\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 14\n", " name: \"Examples\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " properties {\n", " key: \"version\"\n", " value: INT\n", " }\n", " base_type: DATASET\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "example_gen = CsvExampleGen(input_base=_data_root)\n", "context.run(example_gen)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:52.592427Z", "iopub.status.busy": "2022-04-27T09:10:52.592015Z", "iopub.status.idle": "2022-04-27T09:10:52.595674Z", "shell.execute_reply": "2022-04-27T09:10:52.595202Z" }, "id": "880KkTAkPeUg" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[\"train\", \"eval\"] /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1\n" ] } ], "source": [ "artifact = example_gen.outputs['examples'].get()[0]\n", "print(artifact.split_names, artifact.uri)" ] }, { "cell_type": "markdown", "metadata": { "id": "J6vcbW_wPqvl" }, "source": [ "Let’s take a look at the first three training examples:" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:52.598697Z", "iopub.status.busy": "2022-04-27T09:10:52.598232Z", "iopub.status.idle": "2022-04-27T09:10:55.585737Z", "shell.execute_reply": "2022-04-27T09:10:55.585101Z" }, "id": "H4XIXjiCPwzQ" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "features {\n", " feature {\n", " key: \"Age\"\n", " value {\n", " int64_list {\n", " value: 39\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Gain\"\n", " value {\n", " int64_list {\n", " value: 2174\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Loss\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Country\"\n", " value {\n", " bytes_list {\n", " value: \" United-States\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education\"\n", " value {\n", " bytes_list {\n", " value: \" Bachelors\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education-Num\"\n", " value {\n", " int64_list {\n", " value: 13\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Hours-per-week\"\n", " value {\n", " int64_list {\n", " value: 40\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Marital-Status\"\n", " value {\n", " bytes_list {\n", " value: \" Never-married\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Occupation\"\n", " value {\n", " bytes_list {\n", " value: \" Adm-clerical\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Over-50K\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Race\"\n", " value {\n", " bytes_list {\n", " value: \" White\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Relationship\"\n", " value {\n", " bytes_list {\n", " value: \" Not-in-family\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Sex\"\n", " value {\n", " bytes_list {\n", " value: \" Male\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Workclass\"\n", " value {\n", " bytes_list {\n", " value: \" State-gov\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"fnlwgt\"\n", " value {\n", " int64_list {\n", " value: 77516\n", " }\n", " }\n", " }\n", "}\n", "\n", "features {\n", " feature {\n", " key: \"Age\"\n", " value {\n", " int64_list {\n", " value: 50\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Gain\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Loss\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Country\"\n", " value {\n", " bytes_list {\n", " value: \" United-States\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education\"\n", " value {\n", " bytes_list {\n", " value: \" Bachelors\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education-Num\"\n", " value {\n", " int64_list {\n", " value: 13\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Hours-per-week\"\n", " value {\n", " int64_list {\n", " value: 13\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Marital-Status\"\n", " value {\n", " bytes_list {\n", " value: \" Married-civ-spouse\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Occupation\"\n", " value {\n", " bytes_list {\n", " value: \" Exec-managerial\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Over-50K\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Race\"\n", " value {\n", " bytes_list {\n", " value: \" White\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Relationship\"\n", " value {\n", " bytes_list {\n", " value: \" Husband\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Sex\"\n", " value {\n", " bytes_list {\n", " value: \" Male\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Workclass\"\n", " value {\n", " bytes_list {\n", " value: \" Self-emp-not-inc\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"fnlwgt\"\n", " value {\n", " int64_list {\n", " value: 83311\n", " }\n", " }\n", " }\n", "}\n", "\n", "features {\n", " feature {\n", " key: \"Age\"\n", " value {\n", " int64_list {\n", " value: 38\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Gain\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Capital-Loss\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Country\"\n", " value {\n", " bytes_list {\n", " value: \" United-States\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education\"\n", " value {\n", " bytes_list {\n", " value: \" HS-grad\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Education-Num\"\n", " value {\n", " int64_list {\n", " value: 9\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Hours-per-week\"\n", " value {\n", " int64_list {\n", " value: 40\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Marital-Status\"\n", " value {\n", " bytes_list {\n", " value: \" Divorced\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Occupation\"\n", " value {\n", " bytes_list {\n", " value: \" Handlers-cleaners\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Over-50K\"\n", " value {\n", " int64_list {\n", " value: 0\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Race\"\n", " value {\n", " bytes_list {\n", " value: \" White\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Relationship\"\n", " value {\n", " bytes_list {\n", " value: \" Not-in-family\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Sex\"\n", " value {\n", " bytes_list {\n", " value: \" Male\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"Workclass\"\n", " value {\n", " bytes_list {\n", " value: \" Private\"\n", " }\n", " }\n", " }\n", " feature {\n", " key: \"fnlwgt\"\n", " value {\n", " int64_list {\n", " value: 215646\n", " }\n", " }\n", " }\n", "}\n", "\n" ] } ], "source": [ "# Get the URI of the output artifact representing the training examples, which is a directory\n", "train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')\n", "\n", "# Get the list of files in this directory (all compressed TFRecord files)\n", "tfrecord_filenames = [os.path.join(train_uri, name)\n", " for name in os.listdir(train_uri)]\n", "\n", "# Create a `TFRecordDataset` to read these files\n", "dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type=\"GZIP\")\n", "\n", "# Iterate over the first 3 records and decode them.\n", "for tfrecord in dataset.take(3):\n", " serialized_example = tfrecord.numpy()\n", " example = tf.train.Example()\n", " example.ParseFromString(serialized_example)\n", " pp.pprint(example)" ] }, { "cell_type": "markdown", "metadata": { "id": "csM6BFhtk5Aa" }, "source": [ "### StatisticsGen\n", "\n", "`StatisticsGen` takes as input the dataset we just ingested using `ExampleGen` and allows you to perform some analysis of your dataset using TensorFlow Data Validation (TFDV)." ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:55.589432Z", "iopub.status.busy": "2022-04-27T09:10:55.589025Z", "iopub.status.idle": "2022-04-27T09:10:59.684328Z", "shell.execute_reply": "2022-04-27T09:10:59.683736Z" }, "id": "MAscCCYWgA-9" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Excluding no splits because exclude_splits is not set.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for StatisticsGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for StatisticsGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Generating statistics for split train.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Statistics for split train written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2/Split-train.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Generating statistics for split eval.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Statistics for split eval written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2/Split-eval.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:Make sure that locally built Python SDK docker image has Python 3.7 interpreter.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for StatisticsGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f4993f16290
.execution_id2
.component\n", "\n", "
StatisticsGen at 0x7f4993f16910
.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.outputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.exec_properties
['stats_options_json']None
['exclude_splits'][]
.component.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
.component.outputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
" ], "text/plain": [ "ExecutionResult(\n", " component_id: StatisticsGen\n", " execution_id: 2\n", " outputs:\n", " statistics: Channel(\n", " type_name: ExampleStatistics\n", " artifacts: [Artifact(artifact: id: 2\n", " type_id: 16\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2\"\n", " properties {\n", " key: \"split_names\"\n", " value {\n", " string_value: \"[\\\"train\\\", \\\"eval\\\"]\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"statistics\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"StatisticsGen\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 16\n", " name: \"ExampleStatistics\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " base_type: STATISTICS\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "statistics_gen = StatisticsGen(\n", " examples=example_gen.outputs['examples'])\n", "context.run(statistics_gen)" ] }, { "cell_type": "markdown", "metadata": { "id": "0BDfOjGy048O" }, "source": [ "After `StatisticsGen` finishes running, we can visualize the outputted statistics. Try playing with the different plots!" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.687953Z", "iopub.status.busy": "2022-04-27T09:10:59.687412Z", "iopub.status.idle": "2022-04-27T09:10:59.699275Z", "shell.execute_reply": "2022-04-27T09:10:59.698770Z" }, "id": "tLjXy7K6Tp_G" }, "outputs": [ { "data": { "text/html": [ "Artifact at /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
'train' split:

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
'eval' split:

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "\n", " " ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "context.show(statistics_gen.outputs['statistics'])" ] }, { "cell_type": "markdown", "metadata": { "id": "HLKLTO9Nk60p" }, "source": [ "### SchemaGen\n", "\n", "`SchemaGen` will take as input the statistics that we generated with `StatisticsGen`, looking at the training split by default." ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.702520Z", "iopub.status.busy": "2022-04-27T09:10:59.702119Z", "iopub.status.idle": "2022-04-27T09:10:59.862467Z", "shell.execute_reply": "2022-04-27T09:10:59.861819Z" }, "id": "ygQvZ6hsiQ_J" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Excluding no splits because exclude_splits is not set.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for SchemaGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for SchemaGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Processing schema from statistics for split train.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Processing schema from statistics for split eval.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Schema written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3/schema.pbtxt.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for SchemaGen\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f4993f16950
.execution_id3
.component\n", "\n", "
SchemaGen at 0x7f49ad43f990
.inputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.outputs
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
.exec_properties
['infer_feature_shape']0
['exclude_splits'][]
.component.inputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
.component.outputs
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
" ], "text/plain": [ "ExecutionResult(\n", " component_id: SchemaGen\n", " execution_id: 3\n", " outputs:\n", " schema: Channel(\n", " type_name: Schema\n", " artifacts: [Artifact(artifact: id: 3\n", " type_id: 18\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"schema\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"SchemaGen\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 18\n", " name: \"Schema\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "schema_gen = SchemaGen(\n", " statistics=statistics_gen.outputs['statistics'],\n", " infer_feature_shape=False)\n", "context.run(schema_gen)" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.865534Z", "iopub.status.busy": "2022-04-27T09:10:59.865000Z", "iopub.status.idle": "2022-04-27T09:10:59.886293Z", "shell.execute_reply": "2022-04-27T09:10:59.885776Z" }, "id": "Ec9vqDXpXeMb" }, "outputs": [ { "data": { "text/html": [ "Artifact at /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
TypePresenceValencyDomain
Feature name
'Age'INTrequiredsingle-
'Capital-Gain'INTrequiredsingle-
'Capital-Loss'INTrequiredsingle-
'Country'STRINGrequiredsingle'Country'
'Education'STRINGrequiredsingle'Education'
'Education-Num'INTrequiredsingle-
'Hours-per-week'INTrequiredsingle-
'Marital-Status'STRINGrequiredsingle'Marital-Status'
'Occupation'STRINGrequiredsingle'Occupation'
'Over-50K'INTrequiredsingle-
'Race'STRINGrequiredsingle'Race'
'Relationship'STRINGrequiredsingle'Relationship'
'Sex'STRINGrequiredsingle'Sex'
'Workclass'STRINGrequiredsingle'Workclass'
'fnlwgt'INTrequiredsingle-
\n", "
" ], "text/plain": [ " Type Presence Valency Domain\n", "Feature name \n", "'Age' INT required single -\n", "'Capital-Gain' INT required single -\n", "'Capital-Loss' INT required single -\n", "'Country' STRING required single 'Country'\n", "'Education' STRING required single 'Education'\n", "'Education-Num' INT required single -\n", "'Hours-per-week' INT required single -\n", "'Marital-Status' STRING required single 'Marital-Status'\n", "'Occupation' STRING required single 'Occupation'\n", "'Over-50K' INT required single -\n", "'Race' STRING required single 'Race'\n", "'Relationship' STRING required single 'Relationship'\n", "'Sex' STRING required single 'Sex'\n", "'Workclass' STRING required single 'Workclass'\n", "'fnlwgt' INT required single -" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Values
Domain
'Country'' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia', ' Holand-Netherlands'
'Education'' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'
'Marital-Status'' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'
'Occupation'' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'
'Race'' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'
'Relationship'' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'
'Sex'' Female', ' Male'
'Workclass'' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'
\n", "
" ], "text/plain": [ " Values\n", "Domain \n", "'Country' ' ?', ' Cambodia', ' Canada', ' China', ' Columbia', ' Cuba', ' Dominican-Republic', ' Ecuador', ' El-Salvador', ' England', ' France', ' Germany', ' Greece', ' Guatemala', ' Haiti', ' Honduras', ' Hong', ' Hungary', ' India', ' Iran', ' Ireland', ' Italy', ' Jamaica', ' Japan', ' Laos', ' Mexico', ' Nicaragua', ' Outlying-US(Guam-USVI-etc)', ' Peru', ' Philippines', ' Poland', ' Portugal', ' Puerto-Rico', ' Scotland', ' South', ' Taiwan', ' Thailand', ' Trinadad&Tobago', ' United-States', ' Vietnam', ' Yugoslavia', ' Holand-Netherlands'\n", "'Education' ' 10th', ' 11th', ' 12th', ' 1st-4th', ' 5th-6th', ' 7th-8th', ' 9th', ' Assoc-acdm', ' Assoc-voc', ' Bachelors', ' Doctorate', ' HS-grad', ' Masters', ' Preschool', ' Prof-school', ' Some-college'\n", "'Marital-Status' ' Divorced', ' Married-AF-spouse', ' Married-civ-spouse', ' Married-spouse-absent', ' Never-married', ' Separated', ' Widowed'\n", "'Occupation' ' ?', ' Adm-clerical', ' Armed-Forces', ' Craft-repair', ' Exec-managerial', ' Farming-fishing', ' Handlers-cleaners', ' Machine-op-inspct', ' Other-service', ' Priv-house-serv', ' Prof-specialty', ' Protective-serv', ' Sales', ' Tech-support', ' Transport-moving'\n", "'Race' ' Amer-Indian-Eskimo', ' Asian-Pac-Islander', ' Black', ' Other', ' White'\n", "'Relationship' ' Husband', ' Not-in-family', ' Other-relative', ' Own-child', ' Unmarried', ' Wife'\n", "'Sex' ' Female', ' Male'\n", "'Workclass' ' ?', ' Federal-gov', ' Local-gov', ' Never-worked', ' Private', ' Self-emp-inc', ' Self-emp-not-inc', ' State-gov', ' Without-pay'" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "context.show(schema_gen.outputs['schema'])" ] }, { "cell_type": "markdown", "metadata": { "id": "kZWWdbA-m7zp" }, "source": [ "To learn more about schemas, see [the SchemaGen documentation](https://www.tensorflow.org/tfx/guide/schemagen)." ] }, { "cell_type": "markdown", "metadata": { "id": "JPViEz5RlA36" }, "source": [ "### Transform\n", "\n", "`Transform` will take as input the data from `ExampleGen`, the schema from `SchemaGen`, as well as a module that contains user-defined Transform code.\n", "\n", "Let's see an example of user-defined Transform code below (for an introduction to the TensorFlow Transform APIs, [see the tutorial](https://www.tensorflow.org/tfx/tutorials/transform/simple)).\n", "\n" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.889294Z", "iopub.status.busy": "2022-04-27T09:10:59.888847Z", "iopub.status.idle": "2022-04-27T09:10:59.891590Z", "shell.execute_reply": "2022-04-27T09:10:59.891114Z" }, "id": "PuNSiUKb4YJf" }, "outputs": [], "source": [ "_census_income_constants_module_file = 'census_income_constants.py'" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.894446Z", "iopub.status.busy": "2022-04-27T09:10:59.894031Z", "iopub.status.idle": "2022-04-27T09:10:59.898249Z", "shell.execute_reply": "2022-04-27T09:10:59.897741Z" }, "id": "HPjhXuIF4YJh", "jupyter": { "source_hidden": true } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing census_income_constants.py\n" ] } ], "source": [ "%%writefile {_census_income_constants_module_file}\n", "\n", "# Categorical features are assumed to each have a maximum value in the dataset.\n", "MAX_CATEGORICAL_FEATURE_VALUES = [20]\n", "\n", "CATEGORICAL_FEATURE_KEYS = [\"Education-Num\"]\n", "\n", "\n", "DENSE_FLOAT_FEATURE_KEYS = [\"Capital-Gain\", \"Hours-per-week\", \"Capital-Loss\"]\n", "\n", "# Number of buckets used by tf.transform for encoding each feature.\n", "FEATURE_BUCKET_COUNT = 10\n", "\n", "BUCKET_FEATURE_KEYS = [\"Age\"]\n", "\n", "# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform\n", "VOCAB_SIZE = 200\n", "\n", "# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.\n", "OOV_SIZE = 10\n", "\n", "VOCAB_FEATURE_KEYS = [\"Workclass\", \"Education\", \"Marital-Status\", \"Occupation\", \n", " \"Relationship\", \"Race\", \"Sex\", \"Country\"]\n", "\n", "# Keys\n", "LABEL_KEY = \"Over-50K\"\n", "\n", "def transformed_name(key):\n", " return key + '_xf'" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.901175Z", "iopub.status.busy": "2022-04-27T09:10:59.900757Z", "iopub.status.idle": "2022-04-27T09:10:59.903447Z", "shell.execute_reply": "2022-04-27T09:10:59.902961Z" }, "id": "4AJ9hBs94YJm" }, "outputs": [], "source": [ "_census_income_transform_module_file = 'census_income_transform.py'" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.906123Z", "iopub.status.busy": "2022-04-27T09:10:59.905798Z", "iopub.status.idle": "2022-04-27T09:10:59.910414Z", "shell.execute_reply": "2022-04-27T09:10:59.909874Z" }, "id": "MYmxxx9A4YJn", "jupyter": { "source_hidden": true } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing census_income_transform.py\n" ] } ], "source": [ "%%writefile {_census_income_transform_module_file}\n", "\n", "import tensorflow as tf\n", "import tensorflow_transform as tft\n", "\n", "import census_income_constants\n", "\n", "_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS\n", "_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS\n", "_VOCAB_SIZE = census_income_constants.VOCAB_SIZE\n", "_OOV_SIZE = census_income_constants.OOV_SIZE\n", "_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT\n", "_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS\n", "_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS\n", "_LABEL_KEY = census_income_constants.LABEL_KEY\n", "_transformed_name = census_income_constants.transformed_name\n", "\n", "\n", "def preprocessing_fn(inputs):\n", " \"\"\"tf.transform's callback function for preprocessing inputs.\n", " Args:\n", " inputs: map from feature keys to raw not-yet-transformed features.\n", " Returns:\n", " Map from string feature key to transformed feature operations.\n", " \"\"\"\n", " outputs = {}\n", " for key in _DENSE_FLOAT_FEATURE_KEYS:\n", " # Preserve this feature as a dense float, setting nan's to the mean.\n", " outputs[_transformed_name(key)] = tft.scale_to_z_score(\n", " _fill_in_missing(inputs[key]))\n", "\n", " for key in _VOCAB_FEATURE_KEYS:\n", " # Build a vocabulary for this feature.\n", " outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(\n", " _fill_in_missing(inputs[key]),\n", " top_k=_VOCAB_SIZE,\n", " num_oov_buckets=_OOV_SIZE)\n", "\n", " for key in _BUCKET_FEATURE_KEYS:\n", " outputs[_transformed_name(key)] = tft.bucketize(\n", " _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)\n", "\n", " for key in _CATEGORICAL_FEATURE_KEYS:\n", " outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])\n", "\n", " label = _fill_in_missing(inputs[_LABEL_KEY])\n", " outputs[_transformed_name(_LABEL_KEY)] = label\n", " \n", " return outputs\n", "\n", "\n", "def _fill_in_missing(x):\n", " \"\"\"Replace missing values in a SparseTensor.\n", " Fills in missing values of `x` with '' or 0, and converts to a dense tensor.\n", " Args:\n", " x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n", " in the second dimension.\n", " Returns:\n", " A rank 1 tensor where missing values of `x` have been filled in.\n", " \"\"\"\n", " default_value = '' if x.dtype == tf.string else 0\n", " return tf.squeeze(\n", " tf.sparse.to_dense(\n", " tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),\n", " default_value),\n", " axis=1)" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:10:59.912921Z", "iopub.status.busy": "2022-04-27T09:10:59.912756Z", "iopub.status.idle": "2022-04-27T09:11:38.881900Z", "shell.execute_reply": "2022-04-27T09:11:38.881292Z" }, "id": "jHfhth_GiZI9" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Generating ephemeral wheel package for '/tmpfs/src/temp/model_card_toolkit/documentation/examples/census_income_transform.py' (including modules: ['census_income_transform', 'census_income_constants']).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:User module package has hash fingerprint version 9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '/tmpfs/tmp/tmp6ykorptc/_tfx_generated_setup.py', 'bdist_wheel', '--bdist-dir', '/tmpfs/tmp/tmppxg6tk5f', '--dist-dir', '/tmpfs/tmp/tmpmcwpffer']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "running bdist_wheel\n", "running build\n", "running build_py\n", "creating build\n", "creating build/lib\n", "copying census_income_transform.py -> build/lib\n", "copying census_income_constants.py -> build/lib\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "installing to /tmpfs/tmp/tmppxg6tk5f\n", "running install\n", "running install_lib\n", "copying build/lib/census_income_transform.py -> /tmpfs/tmp/tmppxg6tk5f\n", "copying build/lib/census_income_constants.py -> /tmpfs/tmp/tmppxg6tk5f\n", "running install_egg_info\n", "running egg_info\n", "creating tfx_user_code_Transform.egg-info\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "writing tfx_user_code_Transform.egg-info/PKG-INFO\n", "writing dependency_links to tfx_user_code_Transform.egg-info/dependency_links.txt\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/tmpfs/src/tf_docs_env/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", " setuptools.SetuptoolsDeprecationWarning,\n", "INFO:absl:Successfully built user code wheel distribution at '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl'; target user module is 'census_income_transform'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Full user module path is 'census_income_transform@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for Transform\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "writing top-level names to tfx_user_code_Transform.egg-info/top_level.txt\n", "writing manifest file 'tfx_user_code_Transform.egg-info/SOURCES.txt'\n", "reading manifest file 'tfx_user_code_Transform.egg-info/SOURCES.txt'\n", "writing manifest file 'tfx_user_code_Transform.egg-info/SOURCES.txt'\n", "Copying tfx_user_code_Transform.egg-info to /tmpfs/tmp/tmppxg6tk5f/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3.7.egg-info\n", "running install_scripts\n", "creating /tmpfs/tmp/tmppxg6tk5f/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.dist-info/WHEEL\n", "creating '/tmpfs/tmp/tmpmcwpffer/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl' and adding '/tmpfs/tmp/tmppxg6tk5f' to it\n", "adding 'census_income_constants.py'\n", "adding 'census_income_transform.py'\n", "adding 'tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.dist-info/METADATA'\n", "adding 'tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.dist-info/WHEEL'\n", "adding 'tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.dist-info/top_level.txt'\n", "adding 'tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0.dist-info/RECORD'\n", "removing /tmpfs/tmp/tmppxg6tk5f\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for Transform\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Analyze the 'train' split and transform all splits when splits_config is not set.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:udf_utils.get_fn {'module_file': None, 'module_path': 'census_income_transform@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl', 'preprocessing_fn': None} 'preprocessing_fn'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Installing '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl' to a temporary directory.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '-m', 'pip', 'install', '--target', '/tmpfs/tmp/tmpp2fgxex6', '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: You are using pip version 21.3; however, version 22.0.4 is available.\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\n", "INFO:absl:Successfully installed '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:udf_utils.get_fn {'module_file': None, 'module_path': 'census_income_transform@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl', 'stats_options_updater_fn': None} 'stats_options_updater_fn'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Installing '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl' to a temporary directory.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '-m', 'pip', 'install', '--target', '/tmpfs/tmp/tmp08sebpcy', '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Installing collected packages: tfx-user-code-Transform\n", "Successfully installed tfx-user-code-Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: You are using pip version 21.3; however, version 22.0.4 is available.\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\n", "INFO:absl:Successfully installed '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Installing '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl' to a temporary directory.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '-m', 'pip', 'install', '--target', '/tmpfs/tmp/tmpyzdmu59r', '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Installing collected packages: tfx-user-code-Transform\n", "Successfully installed tfx-user-code-Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: You are using pip version 21.3; however, version 22.0.4 is available.\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\n", "INFO:absl:Successfully installed '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Installing collected packages: tfx-user-code-Transform\n", "Successfully installed tfx-user-code-Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From /tmpfs/src/tf_docs_env/lib/python3.7/site-packages/tensorflow_transform/tf_utils.py:289: Tensor.experimental_ref (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use ref() instead.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: Tuple[Dict[str, Union[NoneType, _Dataset]], Union[Dict[str, Dict[str, PCollection]], NoneType], int] instead.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_1/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_2/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_3/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_4/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_5/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_6/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_7/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_1/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_2/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_3/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_4/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_5/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_6/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Tables initialized inside a tf.function will be re-initialized on every invocation of the function. This re-initialization can have significant impact on performance. Consider lifting them out of the graph context using `tf.init_scope`.: compute_and_apply_vocabulary_7/apply_vocab/text_file_init/InitializeTableFromTextFileV2\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:This output type hint will be ignored and not used for type-checking purposes. Typically, output type hints for a PTransform are single (or nested) types wrapped by a PCollection, PDone, or None. Got: Tuple[Dict[str, Union[NoneType, _Dataset]], Union[Dict[str, Dict[str, PCollection]], NoneType], int] instead.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Age has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Gain has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Capital-Loss has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Country has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Education-Num has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Hours-per-week has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Marital-Status has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Occupation has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Over-50K has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Race has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Relationship has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Sex has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature Workclass has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Feature fnlwgt has no shape. Setting to VarLenSparseTensor.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:Make sure that locally built Python SDK docker image has Python 3.7 interpreter.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2022-04-27 09:11:17.569552: W tensorflow/python/util/util.cc:368] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Assets written to: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4/.temp_path/tftransform_tmp/3ec9e5378ce447b5b7959c94b9b4bfda/assets\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_text is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_decision_forests is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:struct2tensor is not available.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Assets written to: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4/.temp_path/tftransform_tmp/3ebfabff57b2413b9071df6af8e0070b/assets\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:If the number of unique tokens is smaller than the provided top_k or approximation error is acceptable, consider using tft.experimental.approximate_vocabulary for a potentially more efficient implementation.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_text is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_decision_forests is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:struct2tensor is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_text is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_decision_forests is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:struct2tensor is not available.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for Transform\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f49af411990
.execution_id4
.component\n", "\n", "
Transform at 0x7f49af161810
.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
.outputs
['transform_graph']\n", "\n", "
Channel of type 'TransformGraph' (1 artifact) at 0x7f49af0df690
.type_nameTransformGraph
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformGraph' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4) at 0x7f49ad4adb90
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4
['transformed_examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f49af0df890
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4) at 0x7f49ad43fed0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['updated_analyzer_cache']\n", "\n", "
Channel of type 'TransformCache' (1 artifact) at 0x7f49af12dd50
.type_nameTransformCache
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformCache' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/updated_analyzer_cache/4) at 0x7f49af064fd0
.type<class 'tfx.types.standard_artifacts.TransformCache'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/updated_analyzer_cache/4
['pre_transform_schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f49af0dffd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_schema/4) at 0x7f49ad446710
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_schema/4
['pre_transform_stats']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f49af0dfe90
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_stats/4) at 0x7f49af0b9410
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_stats/4
.span0
.split_names
['post_transform_schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f49af0dfd50
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_schema/4) at 0x7f49ad70c0d0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_schema/4
['post_transform_stats']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f49af0df750
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_stats/4) at 0x7f4993f16390
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_stats/4
.span0
.split_names
['post_transform_anomalies']\n", "\n", "
Channel of type 'ExampleAnomalies' (1 artifact) at 0x7f4a0424c910
.type_nameExampleAnomalies
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleAnomalies' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_anomalies/4) at 0x7f4993f161d0
.type<class 'tfx.types.standard_artifacts.ExampleAnomalies'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_anomalies/4
.span0
.split_names
.exec_properties
['module_file']None
['preprocessing_fn']None
['stats_options_updater_fn']None
['force_tf_compat_v1']0
['custom_config']null
['splits_config']None
['disable_statistics']0
['module_path']census_income_transform@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Transform-0.0+9ad4c9c61fb069aa72a9c3dbadb0b29c6470126efbcfd2f85e87e9c2355a96d0-py3-none-any.whl
.component.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
.component.outputs
['transform_graph']\n", "\n", "
Channel of type 'TransformGraph' (1 artifact) at 0x7f49af0df690
.type_nameTransformGraph
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformGraph' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4) at 0x7f49ad4adb90
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4
['transformed_examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f49af0df890
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4) at 0x7f49ad43fed0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['updated_analyzer_cache']\n", "\n", "
Channel of type 'TransformCache' (1 artifact) at 0x7f49af12dd50
.type_nameTransformCache
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformCache' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/updated_analyzer_cache/4) at 0x7f49af064fd0
.type<class 'tfx.types.standard_artifacts.TransformCache'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/updated_analyzer_cache/4
['pre_transform_schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f49af0dffd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_schema/4) at 0x7f49ad446710
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_schema/4
['pre_transform_stats']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f49af0dfe90
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_stats/4) at 0x7f49af0b9410
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_stats/4
.span0
.split_names
['post_transform_schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f49af0dfd50
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_schema/4) at 0x7f49ad70c0d0
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_schema/4
['post_transform_stats']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f49af0df750
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_stats/4) at 0x7f4993f16390
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_stats/4
.span0
.split_names
['post_transform_anomalies']\n", "\n", "
Channel of type 'ExampleAnomalies' (1 artifact) at 0x7f4a0424c910
.type_nameExampleAnomalies
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleAnomalies' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_anomalies/4) at 0x7f4993f161d0
.type<class 'tfx.types.standard_artifacts.ExampleAnomalies'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_anomalies/4
.span0
.split_names
" ], "text/plain": [ "ExecutionResult(\n", " component_id: Transform\n", " execution_id: 4\n", " outputs:\n", " transform_graph: Channel(\n", " type_name: TransformGraph\n", " artifacts: [Artifact(artifact: id: 4\n", " type_id: 20\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"transform_graph\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 20\n", " name: \"TransformGraph\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " transformed_examples: Channel(\n", " type_name: Examples\n", " artifacts: [Artifact(artifact: id: 5\n", " type_id: 14\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4\"\n", " properties {\n", " key: \"split_names\"\n", " value {\n", " string_value: \"[\\\"train\\\", \\\"eval\\\"]\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"transformed_examples\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 14\n", " name: \"Examples\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " properties {\n", " key: \"version\"\n", " value: INT\n", " }\n", " base_type: DATASET\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " updated_analyzer_cache: Channel(\n", " type_name: TransformCache\n", " artifacts: [Artifact(artifact: id: 6\n", " type_id: 21\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/updated_analyzer_cache/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"updated_analyzer_cache\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 21\n", " name: \"TransformCache\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " pre_transform_schema: Channel(\n", " type_name: Schema\n", " artifacts: [Artifact(artifact: id: 7\n", " type_id: 18\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_schema/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"pre_transform_schema\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 18\n", " name: \"Schema\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " pre_transform_stats: Channel(\n", " type_name: ExampleStatistics\n", " artifacts: [Artifact(artifact: id: 8\n", " type_id: 16\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/pre_transform_stats/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"pre_transform_stats\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 16\n", " name: \"ExampleStatistics\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " base_type: STATISTICS\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " post_transform_schema: Channel(\n", " type_name: Schema\n", " artifacts: [Artifact(artifact: id: 9\n", " type_id: 18\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_schema/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"post_transform_schema\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 18\n", " name: \"Schema\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " post_transform_stats: Channel(\n", " type_name: ExampleStatistics\n", " artifacts: [Artifact(artifact: id: 10\n", " type_id: 16\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_stats/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"post_transform_stats\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 16\n", " name: \"ExampleStatistics\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " base_type: STATISTICS\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " post_transform_anomalies: Channel(\n", " type_name: ExampleAnomalies\n", " artifacts: [Artifact(artifact: id: 11\n", " type_id: 22\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/post_transform_anomalies/4\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"post_transform_anomalies\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 22\n", " name: \"ExampleAnomalies\"\n", " properties {\n", " key: \"span\"\n", " value: INT\n", " }\n", " properties {\n", " key: \"split_names\"\n", " value: STRING\n", " }\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "transform = Transform(\n", " examples=example_gen.outputs['examples'],\n", " schema=schema_gen.outputs['schema'],\n", " module_file=os.path.abspath(_census_income_transform_module_file))\n", "context.run(transform)" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:38.886990Z", "iopub.status.busy": "2022-04-27T09:11:38.886679Z", "iopub.status.idle": "2022-04-27T09:11:38.891432Z", "shell.execute_reply": "2022-04-27T09:11:38.890895Z" }, "id": "SClrAaEGR1O5" }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
Channel of type 'TransformGraph' (1 artifact) at 0x7f49af0df690
.type_nameTransformGraph
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformGraph' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4) at 0x7f49ad4adb90
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4
" ], "text/plain": [ "Channel(\n", " type_name: TransformGraph\n", " artifacts: [Artifact(artifact: id: 4\n", "type_id: 20\n", "uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4\"\n", "custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"transform_graph\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Transform\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", "}\n", "state: LIVE\n", ", artifact_type: id: 20\n", "name: \"TransformGraph\"\n", ")]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", ")" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "transform.outputs['transform_graph']" ] }, { "cell_type": "markdown", "metadata": { "id": "OBJFtnl6lCg9" }, "source": [ "### Trainer\n", "Let's see an example of user-defined model code below (for an introduction to the TensorFlow Keras APIs, [see the tutorial](https://www.tensorflow.org/guide/keras)):" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:38.894687Z", "iopub.status.busy": "2022-04-27T09:11:38.894144Z", "iopub.status.idle": "2022-04-27T09:11:38.897132Z", "shell.execute_reply": "2022-04-27T09:11:38.896592Z" }, "id": "N1376oq04YJt" }, "outputs": [], "source": [ "_census_income_trainer_module_file = 'census_income_trainer.py'" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:38.900395Z", "iopub.status.busy": "2022-04-27T09:11:38.900211Z", "iopub.status.idle": "2022-04-27T09:11:38.907694Z", "shell.execute_reply": "2022-04-27T09:11:38.907152Z" }, "id": "nf9UuNng4YJu", "jupyter": { "source_hidden": true } }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Writing census_income_trainer.py\n" ] } ], "source": [ "%%writefile {_census_income_trainer_module_file}\n", "\n", "from typing import List, Text\n", "\n", "import os\n", "import absl\n", "import datetime\n", "import tensorflow as tf\n", "import tensorflow_transform as tft\n", "\n", "from tfx.components.trainer.executor import TrainerFnArgs\n", "\n", "import census_income_constants\n", "\n", "_DENSE_FLOAT_FEATURE_KEYS = census_income_constants.DENSE_FLOAT_FEATURE_KEYS\n", "_VOCAB_FEATURE_KEYS = census_income_constants.VOCAB_FEATURE_KEYS\n", "_VOCAB_SIZE = census_income_constants.VOCAB_SIZE\n", "_OOV_SIZE = census_income_constants.OOV_SIZE\n", "_FEATURE_BUCKET_COUNT = census_income_constants.FEATURE_BUCKET_COUNT\n", "_BUCKET_FEATURE_KEYS = census_income_constants.BUCKET_FEATURE_KEYS\n", "_CATEGORICAL_FEATURE_KEYS = census_income_constants.CATEGORICAL_FEATURE_KEYS\n", "_MAX_CATEGORICAL_FEATURE_VALUES = census_income_constants.MAX_CATEGORICAL_FEATURE_VALUES\n", "_LABEL_KEY = census_income_constants.LABEL_KEY\n", "_transformed_name = census_income_constants.transformed_name\n", "\n", "\n", "def _transformed_names(keys):\n", " return [_transformed_name(key) for key in keys]\n", "\n", "\n", "def _gzip_reader_fn(filenames):\n", " \"\"\"Small utility returning a record reader that can read gzip'ed files.\"\"\"\n", " return tf.data.TFRecordDataset(\n", " filenames,\n", " compression_type='GZIP')\n", "\n", "\n", "def _get_serve_tf_examples_fn(model, tf_transform_output):\n", " \"\"\"Returns a function that parses a serialized tf.Example and applies TFT.\"\"\"\n", "\n", " model.tft_layer = tf_transform_output.transform_features_layer()\n", "\n", " @tf.function\n", " def serve_tf_examples_fn(serialized_tf_examples):\n", " \"\"\"Returns the output to be used in the serving signature.\"\"\"\n", " feature_spec = tf_transform_output.raw_feature_spec()\n", " feature_spec.pop(_LABEL_KEY)\n", " parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)\n", "\n", " transformed_features = model.tft_layer(parsed_features)\n", " if _transformed_name(_LABEL_KEY) in transformed_features:\n", " transformed_features.pop(_transformed_name(_LABEL_KEY))\n", "\n", " return model(transformed_features)\n", "\n", " return serve_tf_examples_fn\n", "\n", "\n", "def _input_fn(file_pattern: List[Text],\n", " tf_transform_output: tft.TFTransformOutput,\n", " batch_size: int = 200) -> tf.data.Dataset:\n", " \"\"\"Generates features and label for tuning/training.\n", "\n", " Args:\n", " file_pattern: List of paths or patterns of input tfrecord files.\n", " tf_transform_output: A TFTransformOutput.\n", " batch_size: representing the number of consecutive elements of returned\n", " dataset to combine in a single batch\n", "\n", " Returns:\n", " A dataset that contains (features, indices) tuple where features is a\n", " dictionary of Tensors, and indices is a single Tensor of label indices.\n", " \"\"\"\n", " transformed_feature_spec = (\n", " tf_transform_output.transformed_feature_spec().copy())\n", "\n", " dataset = tf.data.experimental.make_batched_features_dataset(\n", " file_pattern=file_pattern,\n", " batch_size=batch_size,\n", " features=transformed_feature_spec,\n", " reader=_gzip_reader_fn,\n", " label_key=_transformed_name(_LABEL_KEY))\n", "\n", " return dataset\n", "\n", "\n", "def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:\n", " \"\"\"Creates a DNN Keras model.\n", "\n", " Args:\n", " hidden_units: [int], the layer sizes of the DNN (input layer first).\n", "\n", " Returns:\n", " A keras Model.\n", " \"\"\"\n", " real_valued_columns = [\n", " tf.feature_column.numeric_column(key, shape=())\n", " for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n", " ]\n", " categorical_columns = [\n", " tf.feature_column.categorical_column_with_identity(\n", " key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n", " for key in _transformed_names(_VOCAB_FEATURE_KEYS)\n", " ]\n", " categorical_columns += [\n", " tf.feature_column.categorical_column_with_identity(\n", " key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n", " for key in _transformed_names(_BUCKET_FEATURE_KEYS)\n", " ]\n", " categorical_columns += [\n", " tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n", " key,\n", " num_buckets=num_buckets,\n", " default_value=0) for key, num_buckets in zip(\n", " _transformed_names(_CATEGORICAL_FEATURE_KEYS),\n", " _MAX_CATEGORICAL_FEATURE_VALUES)\n", " ]\n", " indicator_column = [\n", " tf.feature_column.indicator_column(categorical_column)\n", " for categorical_column in categorical_columns\n", " ]\n", "\n", " model = _wide_and_deep_classifier(\n", " # TODO(b/139668410) replace with premade wide_and_deep keras model\n", " wide_columns=indicator_column,\n", " deep_columns=real_valued_columns,\n", " dnn_hidden_units=hidden_units or [100, 70, 50, 25])\n", " return model\n", "\n", "\n", "def _wide_and_deep_classifier(wide_columns, deep_columns, dnn_hidden_units):\n", " \"\"\"Build a simple keras wide and deep model.\n", "\n", " Args:\n", " wide_columns: Feature columns wrapped in indicator_column for wide (linear)\n", " part of the model.\n", " deep_columns: Feature columns for deep part of the model.\n", " dnn_hidden_units: [int], the layer sizes of the hidden DNN.\n", "\n", " Returns:\n", " A Wide and Deep Keras model\n", " \"\"\"\n", " # Following values are hard coded for simplicity in this example,\n", " # However prefarably they should be passsed in as hparams.\n", "\n", " # Keras needs the feature definitions at compile time.\n", " # TODO(b/139081439): Automate generation of input layers from FeatureColumn.\n", " input_layers = {\n", " colname: tf.keras.layers.Input(name=colname, shape=(), dtype=tf.float32)\n", " for colname in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n", " }\n", " input_layers.update({\n", " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", " for colname in _transformed_names(_VOCAB_FEATURE_KEYS)\n", " })\n", " input_layers.update({\n", " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", " for colname in _transformed_names(_BUCKET_FEATURE_KEYS)\n", " })\n", " input_layers.update({\n", " colname: tf.keras.layers.Input(name=colname, shape=(), dtype='int32')\n", " for colname in _transformed_names(_CATEGORICAL_FEATURE_KEYS)\n", " })\n", "\n", " # TODO(b/161816639): SparseFeatures for feature columns + Keras.\n", " deep = tf.keras.layers.DenseFeatures(deep_columns)(input_layers)\n", " for numnodes in dnn_hidden_units:\n", " deep = tf.keras.layers.Dense(numnodes)(deep)\n", " wide = tf.keras.layers.DenseFeatures(wide_columns)(input_layers)\n", "\n", " output = tf.keras.layers.Dense(\n", " 1, activation='sigmoid')(\n", " tf.keras.layers.concatenate([deep, wide]))\n", "\n", " model = tf.keras.Model(input_layers, output)\n", " model.compile(\n", " loss='binary_crossentropy',\n", " optimizer=tf.keras.optimizers.Adam(lr=0.001),\n", " metrics=[tf.keras.metrics.BinaryAccuracy()])\n", " model.summary(print_fn=absl.logging.info)\n", " return model\n", "\n", "\n", "# TFX Trainer will call this function.\n", "def run_fn(fn_args: TrainerFnArgs):\n", " \"\"\"Train the model based on given args.\n", "\n", " Args:\n", " fn_args: Holds args used to train the model as name/value pairs.\n", " \"\"\"\n", " # Number of nodes in the first layer of the DNN\n", " first_dnn_layer_size = 100\n", " num_dnn_layers = 4\n", " dnn_decay_factor = 0.7\n", "\n", " tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n", "\n", " train_dataset = _input_fn(fn_args.train_files, tf_transform_output, 40)\n", " eval_dataset = _input_fn(fn_args.eval_files, tf_transform_output, 40)\n", "\n", " model = _build_keras_model(\n", " # Construct layers sizes with exponetial decay\n", " hidden_units=[\n", " max(2, int(first_dnn_layer_size * dnn_decay_factor**i))\n", " for i in range(num_dnn_layers)\n", " ])\n", "\n", " # This log path might change in the future.\n", " log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n", " tensorboard_callback = tf.keras.callbacks.TensorBoard(\n", " log_dir=log_dir, update_freq='batch')\n", " model.fit(\n", " train_dataset,\n", " steps_per_epoch=fn_args.train_steps,\n", " validation_data=eval_dataset,\n", " validation_steps=fn_args.eval_steps,\n", " callbacks=[tensorboard_callback])\n", "\n", " signatures = {\n", " 'serving_default':\n", " _get_serve_tf_examples_fn(model,\n", " tf_transform_output).get_concrete_function(\n", " tf.TensorSpec(\n", " shape=[None],\n", " dtype=tf.string,\n", " name='examples')),\n", " }\n", " model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:38.910681Z", "iopub.status.busy": "2022-04-27T09:11:38.910197Z", "iopub.status.idle": "2022-04-27T09:11:53.925707Z", "shell.execute_reply": "2022-04-27T09:11:53.925035Z" }, "id": "429-vvCWibO0" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:`custom_executor_spec` is deprecated. Please customize component directly.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Generating ephemeral wheel package for '/tmpfs/src/temp/model_card_toolkit/documentation/examples/census_income_trainer.py' (including modules: ['census_income_transform', 'census_income_trainer', 'census_income_constants']).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:User module package has hash fingerprint version aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '/tmpfs/tmp/tmpwqql_i8g/_tfx_generated_setup.py', 'bdist_wheel', '--bdist-dir', '/tmpfs/tmp/tmpzpmolcsr', '--dist-dir', '/tmpfs/tmp/tmpnkgc7yz1']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "running bdist_wheel\n", "running build\n", "running build_py\n", "creating build\n", "creating build/lib\n", "copying census_income_transform.py -> build/lib\n", "copying census_income_trainer.py -> build/lib\n", "copying census_income_constants.py -> build/lib\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "installing to /tmpfs/tmp/tmpzpmolcsr\n", "running install\n", "running install_lib\n", "copying build/lib/census_income_transform.py -> /tmpfs/tmp/tmpzpmolcsr\n", "copying build/lib/census_income_trainer.py -> /tmpfs/tmp/tmpzpmolcsr\n", "copying build/lib/census_income_constants.py -> /tmpfs/tmp/tmpzpmolcsr\n", "running install_egg_info\n", "running egg_info\n", "creating tfx_user_code_Trainer.egg-info\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "writing tfx_user_code_Trainer.egg-info/PKG-INFO\n", "writing dependency_links to tfx_user_code_Trainer.egg-info/dependency_links.txt\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/tmpfs/src/tf_docs_env/lib/python3.7/site-packages/setuptools/command/install.py:37: SetuptoolsDeprecationWarning: setup.py install is deprecated. Use build and pip and other standards-based tools.\n", " setuptools.SetuptoolsDeprecationWarning,\n", "INFO:absl:Successfully built user code wheel distribution at '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'; target user module is 'census_income_trainer'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Full user module path is 'census_income_trainer@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for Trainer\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "writing top-level names to tfx_user_code_Trainer.egg-info/top_level.txt\n", "writing manifest file 'tfx_user_code_Trainer.egg-info/SOURCES.txt'\n", "reading manifest file 'tfx_user_code_Trainer.egg-info/SOURCES.txt'\n", "writing manifest file 'tfx_user_code_Trainer.egg-info/SOURCES.txt'\n", "Copying tfx_user_code_Trainer.egg-info to /tmpfs/tmp/tmpzpmolcsr/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3.7.egg-info\n", "running install_scripts\n", "creating /tmpfs/tmp/tmpzpmolcsr/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.dist-info/WHEEL\n", "creating '/tmpfs/tmp/tmpnkgc7yz1/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' and adding '/tmpfs/tmp/tmpzpmolcsr' to it\n", "adding 'census_income_constants.py'\n", "adding 'census_income_trainer.py'\n", "adding 'census_income_transform.py'\n", "adding 'tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.dist-info/METADATA'\n", "adding 'tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.dist-info/WHEEL'\n", "adding 'tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.dist-info/top_level.txt'\n", "adding 'tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe.dist-info/RECORD'\n", "removing /tmpfs/tmp/tmpzpmolcsr\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for Trainer\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Train on the 'train' split when train_args.splits is not set.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Evaluate on the 'eval' split when eval_args.splits is not set.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:Examples artifact does not have payload_format custom property. Falling back to FORMAT_TF_EXAMPLE\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:udf_utils.get_fn {'train_args': '{\\n \"num_steps\": 100\\n}', 'eval_args': '{\\n \"num_steps\": 50\\n}', 'module_file': None, 'run_fn': None, 'trainer_fn': None, 'custom_config': 'null', 'module_path': 'census_income_trainer@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'} 'run_fn'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Installing '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl' to a temporary directory.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Executing: ['/tmpfs/src/tf_docs_env/bin/python', '-m', 'pip', 'install', '--target', '/tmpfs/tmp/tmpj80rqe5y', '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl']\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Processing /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING: You are using pip version 21.3; however, version 22.0.4 is available.\n", "You should consider upgrading via the '/tmpfs/src/tf_docs_env/bin/python -m pip install --upgrade pip' command.\n", "INFO:absl:Successfully installed '/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl'.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Training model.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Installing collected packages: tfx-user-code-Trainer\n", "Successfully installed tfx-user-code-Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/tmpfs/src/tf_docs_env/lib/python3.7/site-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.\n", " super(Adam, self).__init__(name, **kwargs)\n", "INFO:absl:Model: \"model\"\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:__________________________________________________________________________________________________\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Layer (type) Output Shape Param # Connected to \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:==================================================================================================\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Age_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Capital-Gain_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Capital-Loss_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Country_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Education-Num_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Education_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Hours-per-week_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Marital-Status_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Occupation_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Race_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Relationship_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Sex_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: Workclass_xf (InputLayer) [(None,)] 0 [] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_features (DenseFeatures) (None, 3) 0 ['Age_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Capital-Gain_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Capital-Loss_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Country_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Education-Num_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Education_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Hours-per-week_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Marital-Status_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Occupation_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Race_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Relationship_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Sex_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Workclass_xf[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense (Dense) (None, 100) 400 ['dense_features[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_1 (Dense) (None, 70) 7070 ['dense[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_2 (Dense) (None, 48) 3408 ['dense_1[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_3 (Dense) (None, 34) 1666 ['dense_2[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_features_1 (DenseFeature (None, 1710) 0 ['Age_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: s) 'Capital-Gain_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Capital-Loss_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Country_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Education-Num_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Education_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Hours-per-week_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Marital-Status_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Occupation_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Race_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Relationship_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Sex_xf[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'Workclass_xf[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: concatenate (Concatenate) (None, 1744) 0 ['dense_3[0][0]', \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: 'dense_features_1[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: dense_4 (Dense) (None, 1) 1745 ['concatenate[0][0]'] \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl: \n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:==================================================================================================\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Total params: 14,289\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Trainable params: 14,289\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Non-trainable params: 0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:__________________________________________________________________________________________________\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\r", " 1/100 [..............................] - ETA: 3:37 - loss: 0.6884 - binary_accuracy: 0.5750" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 9/100 [=>............................] - ETA: 0s - loss: 0.6558 - binary_accuracy: 0.6722 " ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 18/100 [====>.........................] - ETA: 0s - loss: 0.6151 - binary_accuracy: 0.7097" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 27/100 [=======>......................] - ETA: 0s - loss: 0.5784 - binary_accuracy: 0.7380" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 35/100 [=========>....................] - ETA: 0s - loss: 0.5542 - binary_accuracy: 0.7521" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 44/100 [============>.................] - ETA: 0s - loss: 0.5199 - binary_accuracy: 0.7750" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 52/100 [==============>...............] - ETA: 0s - loss: 0.5171 - binary_accuracy: 0.7774" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 60/100 [=================>............] - ETA: 0s - loss: 0.5117 - binary_accuracy: 0.7804" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 68/100 [===================>..........] - ETA: 0s - loss: 0.5068 - binary_accuracy: 0.7816" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 76/100 [=====================>........] - ETA: 0s - loss: 0.5062 - binary_accuracy: 0.7783" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 84/100 [========================>.....] - ETA: 0s - loss: 0.5004 - binary_accuracy: 0.7818" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", " 92/100 [==========================>...] - ETA: 0s - loss: 0.4963 - binary_accuracy: 0.7829" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", "100/100 [==============================] - ETA: 0s - loss: 0.4904 - binary_accuracy: 0.7855" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", "100/100 [==============================] - 4s 14ms/step - loss: 0.4904 - binary_accuracy: 0.7855 - val_loss: 0.4341 - val_binary_accuracy: 0.8115\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_text is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:tensorflow_decision_forests is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:struct2tensor is not available.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "INFO:tensorflow:Assets written to: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5/Format-Serving/assets\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Training complete. Model written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5/Format-Serving. ModelRun written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for Trainer\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f499c6faa50
.execution_id5
.component\n", "\n", "
Trainer at 0x7f4993e65310
.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f49af0df890
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4) at 0x7f49ad43fed0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['transform_graph']\n", "\n", "
Channel of type 'TransformGraph' (1 artifact) at 0x7f49af0df690
.type_nameTransformGraph
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformGraph' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4) at 0x7f49ad4adb90
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
.outputs
['model']\n", "\n", "
Channel of type 'Model' (1 artifact) at 0x7f4993e5cf10
.type_nameModel
._artifacts
[0]\n", "\n", "
Artifact of type 'Model' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5) at 0x7f4993df6810
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5
['model_run']\n", "\n", "
Channel of type 'ModelRun' (1 artifact) at 0x7f4993e5c250
.type_nameModelRun
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelRun' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5) at 0x7f4993df6f90
.type<class 'tfx.types.standard_artifacts.ModelRun'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5
.exec_properties
['train_args']{\n", " "num_steps": 100\n", "}
['eval_args']{\n", " "num_steps": 50\n", "}
['module_file']None
['run_fn']None
['trainer_fn']None
['custom_config']null
['module_path']census_income_trainer@/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/_wheels/tfx_user_code_Trainer-0.0+aa1f5233b0e0a112365f42ff9488eec93da45611d414b4dc3c0a3a28e1aaa3fe-py3-none-any.whl
.component.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f49af0df890
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4) at 0x7f49ad43fed0
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transformed_examples/4
.span0
.split_names["train", "eval"]
.version0
['transform_graph']\n", "\n", "
Channel of type 'TransformGraph' (1 artifact) at 0x7f49af0df690
.type_nameTransformGraph
._artifacts
[0]\n", "\n", "
Artifact of type 'TransformGraph' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4) at 0x7f49ad4adb90
.type<class 'tfx.types.standard_artifacts.TransformGraph'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Transform/transform_graph/4
['schema']\n", "\n", "
Channel of type 'Schema' (1 artifact) at 0x7f4a04222bd0
.type_nameSchema
._artifacts
[0]\n", "\n", "
Artifact of type 'Schema' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3) at 0x7f49af0b0610
.type<class 'tfx.types.standard_artifacts.Schema'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/SchemaGen/schema/3
.component.outputs
['model']\n", "\n", "
Channel of type 'Model' (1 artifact) at 0x7f4993e5cf10
.type_nameModel
._artifacts
[0]\n", "\n", "
Artifact of type 'Model' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5) at 0x7f4993df6810
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5
['model_run']\n", "\n", "
Channel of type 'ModelRun' (1 artifact) at 0x7f4993e5c250
.type_nameModelRun
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelRun' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5) at 0x7f4993df6f90
.type<class 'tfx.types.standard_artifacts.ModelRun'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5
" ], "text/plain": [ "ExecutionResult(\n", " component_id: Trainer\n", " execution_id: 5\n", " outputs:\n", " model: Channel(\n", " type_name: Model\n", " artifacts: [Artifact(artifact: id: 12\n", " type_id: 24\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Trainer\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 24\n", " name: \"Model\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " model_run: Channel(\n", " type_name: ModelRun\n", " artifacts: [Artifact(artifact: id: 13\n", " type_id: 25\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model_run\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Trainer\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 25\n", " name: \"ModelRun\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "trainer = Trainer(\n", " module_file=os.path.abspath(_census_income_trainer_module_file),\n", " custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n", " examples=transform.outputs['transformed_examples'],\n", " transform_graph=transform.outputs['transform_graph'],\n", " schema=schema_gen.outputs['schema'],\n", " train_args=trainer_pb2.TrainArgs(num_steps=100),\n", " eval_args=trainer_pb2.EvalArgs(num_steps=50))\n", "context.run(trainer)" ] }, { "cell_type": "code", "execution_count": 26, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:53.929260Z", "iopub.status.busy": "2022-04-27T09:11:53.928719Z", "iopub.status.idle": "2022-04-27T09:11:53.933306Z", "shell.execute_reply": "2022-04-27T09:11:53.932789Z" }, "id": "cSb8fhbQDmyJ" }, "outputs": [ { "data": { "text/plain": [ "{'model': Channel(\n", " type_name: Model\n", " artifacts: [Artifact(artifact: id: 12\n", " type_id: 24\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Trainer\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 24\n", " name: \"Model\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ),\n", " 'model_run': Channel(\n", " type_name: ModelRun\n", " artifacts: [Artifact(artifact: id: 13\n", " type_id: 25\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model_run/5\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model_run\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Trainer\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 25\n", " name: \"ModelRun\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )}" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "trainer.outputs" ] }, { "cell_type": "markdown", "metadata": { "id": "FmPftrv0lEQy" }, "source": [ "### Evaluator\n", "The `Evaluator` component computes model performance metrics over the evaluation set. It uses the [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) library. \n", "\n", "`Evaluator` will take as input the data from `ExampleGen`, the trained model from `Trainer`, and slicing configuration. The slicing configuration allows you to slice your metrics on feature values. See an example of this configuration below:" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:53.936531Z", "iopub.status.busy": "2022-04-27T09:11:53.936025Z", "iopub.status.idle": "2022-04-27T09:11:53.941438Z", "shell.execute_reply": "2022-04-27T09:11:53.940937Z" }, "id": "fVhfzzh9PDEx" }, "outputs": [], "source": [ "from google.protobuf.wrappers_pb2 import BoolValue\n", "\n", "eval_config = tfma.EvalConfig(\n", " model_specs=[\n", " # This assumes a serving model with signature 'serving_default'. If\n", " # using estimator based EvalSavedModel, add signature_name: 'eval' and \n", " # remove the label_key.\n", " tfma.ModelSpec(label_key=\"Over-50K\")\n", " ],\n", " metrics_specs=[\n", " tfma.MetricsSpec(\n", " # The metrics added here are in addition to those saved with the\n", " # model (assuming either a keras model or EvalSavedModel is used).\n", " # Any metrics added into the saved model (for example using\n", " # model.compile(..., metrics=[...]), etc) will be computed\n", " # automatically.\n", " # To add validation thresholds for metrics saved with the model,\n", " # add them keyed by metric name to the thresholds map.\n", " metrics=[\n", " tfma.MetricConfig(class_name='ExampleCount'),\n", " tfma.MetricConfig(class_name='BinaryAccuracy'),\n", " tfma.MetricConfig(class_name='FairnessIndicators',\n", " config='{ \"thresholds\": [0.5] }'),\n", " ]\n", " )\n", " ],\n", " slicing_specs=[\n", " # An empty slice spec means the overall slice, i.e. the whole dataset.\n", " tfma.SlicingSpec(),\n", " # Data can be sliced along a feature column. In this case, data is\n", " # sliced by feature column Race and Sex.\n", " tfma.SlicingSpec(feature_keys=['Race']),\n", " tfma.SlicingSpec(feature_keys=['Sex']),\n", " tfma.SlicingSpec(feature_keys=['Race', 'Sex']),\n", " ],\n", " options = tfma.Options(compute_confidence_intervals=BoolValue(value=True))\n", ")" ] }, { "cell_type": "markdown", "metadata": { "id": "QfbptwWQ4k0z" }, "source": [ "Warning: the Evaluator Component may take 5-10 minutes to run due to errors regarding \"inconsistent references\". " ] }, { "cell_type": "code", "execution_count": 28, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:11:53.944397Z", "iopub.status.busy": "2022-04-27T09:11:53.943900Z", "iopub.status.idle": "2022-04-27T09:16:57.184273Z", "shell.execute_reply": "2022-04-27T09:16:57.183722Z" }, "id": "Zjcx8g6mihSt" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for Evaluator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for Evaluator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Nonempty beam arg extra_packages already includes dependency\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:udf_utils.get_fn {'eval_config': '{\\n \"metrics_specs\": [\\n {\\n \"metrics\": [\\n {\\n \"class_name\": \"ExampleCount\"\\n },\\n {\\n \"class_name\": \"BinaryAccuracy\"\\n },\\n {\\n \"class_name\": \"FairnessIndicators\",\\n \"config\": \"{ \\\\\"thresholds\\\\\": [0.5] }\"\\n }\\n ]\\n }\\n ],\\n \"model_specs\": [\\n {\\n \"label_key\": \"Over-50K\"\\n }\\n ],\\n \"options\": {\\n \"compute_confidence_intervals\": true\\n },\\n \"slicing_specs\": [\\n {},\\n {\\n \"feature_keys\": [\\n \"Race\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Sex\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Race\",\\n \"Sex\"\\n ]\\n }\\n ]\\n}', 'feature_slicing_spec': None, 'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'module_file': None, 'module_path': None} 'custom_eval_shared_model'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", "model_specs {\n", " label_key: \"Over-50K\"\n", "}\n", "slicing_specs {\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Sex\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", " feature_keys: \"Sex\"\n", "}\n", "metrics_specs {\n", " metrics {\n", " class_name: \"ExampleCount\"\n", " }\n", " metrics {\n", " class_name: \"BinaryAccuracy\"\n", " }\n", " metrics {\n", " class_name: \"FairnessIndicators\"\n", " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", " }\n", "}\n", "options {\n", " compute_confidence_intervals {\n", " value: true\n", " }\n", " confidence_intervals {\n", " method: JACKKNIFE\n", " }\n", "}\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Using /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5/Format-Serving as model.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f49ad48c2d0> and ).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:The 'example_splits' parameter is not set, using 'eval' split.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Evaluating model.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:udf_utils.get_fn {'eval_config': '{\\n \"metrics_specs\": [\\n {\\n \"metrics\": [\\n {\\n \"class_name\": \"ExampleCount\"\\n },\\n {\\n \"class_name\": \"BinaryAccuracy\"\\n },\\n {\\n \"class_name\": \"FairnessIndicators\",\\n \"config\": \"{ \\\\\"thresholds\\\\\": [0.5] }\"\\n }\\n ]\\n }\\n ],\\n \"model_specs\": [\\n {\\n \"label_key\": \"Over-50K\"\\n }\\n ],\\n \"options\": {\\n \"compute_confidence_intervals\": true\\n },\\n \"slicing_specs\": [\\n {},\\n {\\n \"feature_keys\": [\\n \"Race\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Sex\"\\n ]\\n },\\n {\\n \"feature_keys\": [\\n \"Race\",\\n \"Sex\"\\n ]\\n }\\n ]\\n}', 'feature_slicing_spec': None, 'fairness_indicator_thresholds': 'null', 'example_splits': 'null', 'module_file': None, 'module_path': None} 'custom_extractors'\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", "model_specs {\n", " label_key: \"Over-50K\"\n", "}\n", "slicing_specs {\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Sex\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", " feature_keys: \"Sex\"\n", "}\n", "metrics_specs {\n", " metrics {\n", " class_name: \"ExampleCount\"\n", " }\n", " metrics {\n", " class_name: \"BinaryAccuracy\"\n", " }\n", " metrics {\n", " class_name: \"FairnessIndicators\"\n", " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", " }\n", " model_names: \"\"\n", "}\n", "options {\n", " compute_confidence_intervals {\n", " value: true\n", " }\n", " confidence_intervals {\n", " method: JACKKNIFE\n", " }\n", "}\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", "model_specs {\n", " label_key: \"Over-50K\"\n", "}\n", "slicing_specs {\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Sex\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", " feature_keys: \"Sex\"\n", "}\n", "metrics_specs {\n", " metrics {\n", " class_name: \"ExampleCount\"\n", " }\n", " metrics {\n", " class_name: \"BinaryAccuracy\"\n", " }\n", " metrics {\n", " class_name: \"FairnessIndicators\"\n", " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", " }\n", " model_names: \"\"\n", "}\n", "options {\n", " compute_confidence_intervals {\n", " value: true\n", " }\n", " confidence_intervals {\n", " method: JACKKNIFE\n", " }\n", "}\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Request was made to ignore the baseline ModelSpec and any change thresholds. This is likely because a baseline model was not provided: updated_config=\n", "model_specs {\n", " label_key: \"Over-50K\"\n", "}\n", "slicing_specs {\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Sex\"\n", "}\n", "slicing_specs {\n", " feature_keys: \"Race\"\n", " feature_keys: \"Sex\"\n", "}\n", "metrics_specs {\n", " metrics {\n", " class_name: \"ExampleCount\"\n", " }\n", " metrics {\n", " class_name: \"BinaryAccuracy\"\n", " }\n", " metrics {\n", " class_name: \"FairnessIndicators\"\n", " config: \"{ \\\"thresholds\\\": [0.5] }\"\n", " }\n", " model_names: \"\"\n", "}\n", "options {\n", " compute_confidence_intervals {\n", " value: true\n", " }\n", " confidence_intervals {\n", " method: JACKKNIFE\n", " }\n", "}\n", "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f499c6a02d0> and ).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:root:Make sure that locally built Python SDK docker image has Python 3.7 interpreter.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f46fc38e050> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4993eb72d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4898291450> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f46c4408f50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f46466f7890> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4644f58cd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4644148710> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4245492f50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4244641f90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423f6905d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423e8c24d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423d9c6f50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423cb6b490> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4223bcd850> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4222e030d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4221f0bb10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f42210b0690> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4220126750> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f41fb19e0d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f41fa44a910> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f41f8549890> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4179781610> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4178855810> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414b966a90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414a9f94d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4149aa5510> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4148d5d810> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4143e1cf50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4142e1d910> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4141ee57d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4140f52650> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414001ad90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f41031a6d10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4143eef6d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4100f7bd50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e3f7c9d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e3090710> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e20cbe90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e12aa710> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e02e0310> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d74aca90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d64bc8d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d5608a50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d46a6a50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d37e6fd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d2842050> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d1923dd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d09e5b50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cfa51a50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40ceb09f90> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cdb1a690> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423ce15310> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cc202350> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cb25bb50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40ca414650> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c943b850> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c8b60290> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c7bb3110> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c6caa4d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c5da5950> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c4e7ccd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c3f28a50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c2ffcb10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c206df10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c118d410> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40c02b8810> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f42238f6510> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423da6ab10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4245111550> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f46fc11a050> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4993f132d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423dca6450> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4644f1ab50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423fcc5910> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423c958f50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f422275c650> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4644b46510> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4221afbed0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414b96dcd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4222baee10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4221518410> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f42451d14d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f423f431f50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4223732b50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4220a2e690> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f41f9e9f850> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40becc6b50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cf148150> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d29e9f10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d70e50d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e3232710> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4148316650> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f417846d590> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e062c290> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4143dd2fd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40ce91d110> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cfc7c6d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414a6f8250> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40cf4c9210> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414848c750> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e0b71950> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414a0e7990> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d0f63250> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d2cdff50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d209af50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d05f7290> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d402d050> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414ac9e8d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d4112f10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4101a65cd0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f417942ef50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414a5457d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4143601a50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4102633610> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40e112a4d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d461cd10> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40d0808110> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40bdf44590> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40bd0a99d0> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40bc232090> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f40bb26af50> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f414bf80450> and ).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program.\n", "\n", "Two checkpoint references resolved to different objects (TransformFeaturesLayer object at 0x7f4646281050> and ).\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Evaluation complete. Results written to /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:No threshold configured, will not validate model.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for Evaluator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f4824190ed0
.execution_id6
.component\n", "\n", "
Evaluator at 0x7f471c462d10
.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['model']\n", "\n", "
Channel of type 'Model' (1 artifact) at 0x7f4993e5cf10
.type_nameModel
._artifacts
[0]\n", "\n", "
Artifact of type 'Model' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5) at 0x7f4993df6810
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5
.outputs
['evaluation']\n", "\n", "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7f471c462650
.type_nameModelEvaluation
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelEvaluation' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6) at 0x7f499c890850
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6
['blessing']\n", "\n", "
Channel of type 'ModelBlessing' (1 artifact) at 0x7f471c462ad0
.type_nameModelBlessing
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelBlessing' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6) at 0x7f477c1de9d0
.type<class 'tfx.types.standard_artifacts.ModelBlessing'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6
.exec_properties
['eval_config']{\n", " "metrics_specs": [\n", " {\n", " "metrics": [\n", " {\n", " "class_name": "ExampleCount"\n", " },\n", " {\n", " "class_name": "BinaryAccuracy"\n", " },\n", " {\n", " "class_name": "FairnessIndicators",\n", " "config": "{ \\"thresholds\\": [0.5] }"\n", " }\n", " ]\n", " }\n", " ],\n", " "model_specs": [\n", " {\n", " "label_key": "Over-50K"\n", " }\n", " ],\n", " "options": {\n", " "compute_confidence_intervals": true\n", " },\n", " "slicing_specs": [\n", " {},\n", " {\n", " "feature_keys": [\n", " "Race"\n", " ]\n", " },\n", " {\n", " "feature_keys": [\n", " "Sex"\n", " ]\n", " },\n", " {\n", " "feature_keys": [\n", " "Race",\n", " "Sex"\n", " ]\n", " }\n", " ]\n", "}
['feature_slicing_spec']None
['fairness_indicator_thresholds']null
['example_splits']null
['module_file']None
['module_path']None
.component.inputs
['examples']\n", "\n", "
Channel of type 'Examples' (1 artifact) at 0x7f4824620b50
.type_nameExamples
._artifacts
[0]\n", "\n", "
Artifact of type 'Examples' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1) at 0x7f4a04201d90
.type<class 'tfx.types.standard_artifacts.Examples'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/CsvExampleGen/examples/1
.span0
.split_names["train", "eval"]
.version0
['model']\n", "\n", "
Channel of type 'Model' (1 artifact) at 0x7f4993e5cf10
.type_nameModel
._artifacts
[0]\n", "\n", "
Artifact of type 'Model' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5) at 0x7f4993df6810
.type<class 'tfx.types.standard_artifacts.Model'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Trainer/model/5
.component.outputs
['evaluation']\n", "\n", "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7f471c462650
.type_nameModelEvaluation
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelEvaluation' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6) at 0x7f499c890850
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6
['blessing']\n", "\n", "
Channel of type 'ModelBlessing' (1 artifact) at 0x7f471c462ad0
.type_nameModelBlessing
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelBlessing' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6) at 0x7f477c1de9d0
.type<class 'tfx.types.standard_artifacts.ModelBlessing'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6
" ], "text/plain": [ "ExecutionResult(\n", " component_id: Evaluator\n", " execution_id: 6\n", " outputs:\n", " evaluation: Channel(\n", " type_name: ModelEvaluation\n", " artifacts: [Artifact(artifact: id: 14\n", " type_id: 27\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"evaluation\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Evaluator\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 27\n", " name: \"ModelEvaluation\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )\n", " blessing: Channel(\n", " type_name: ModelBlessing\n", " artifacts: [Artifact(artifact: id: 15\n", " type_id: 28\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"blessing\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Evaluator\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 28\n", " name: \"ModelBlessing\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 28, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Use TFMA to compute a evaluation statistics over features of a model and\n", "# validate them against a baseline.\n", "\n", "# TODO(b/226656838) Fix the inconsistent references warnings.\n", "evaluator = Evaluator(\n", " examples=example_gen.outputs['examples'],\n", " model=trainer.outputs['model'],\n", " eval_config=eval_config)\n", "context.run(evaluator)" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:16:57.187589Z", "iopub.status.busy": "2022-04-27T09:16:57.187181Z", "iopub.status.idle": "2022-04-27T09:16:57.191515Z", "shell.execute_reply": "2022-04-27T09:16:57.191016Z" }, "id": "k4GghePOTJxL" }, "outputs": [ { "data": { "text/plain": [ "{'evaluation': Channel(\n", " type_name: ModelEvaluation\n", " artifacts: [Artifact(artifact: id: 14\n", " type_id: 27\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"evaluation\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Evaluator\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 27\n", " name: \"ModelEvaluation\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ),\n", " 'blessing': Channel(\n", " type_name: ModelBlessing\n", " artifacts: [Artifact(artifact: id: 15\n", " type_id: 28\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/blessing/6\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"blessing\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"Evaluator\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 28\n", " name: \"ModelBlessing\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " )}" ] }, "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ "evaluator.outputs" ] }, { "cell_type": "markdown", "metadata": { "id": "Y5TMskWe9LL0" }, "source": [ "Using the `evaluation` output we can show the default visualization of global metrics on the entire evaluation set." ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:16:57.194655Z", "iopub.status.busy": "2022-04-27T09:16:57.194227Z", "iopub.status.idle": "2022-04-27T09:16:57.214348Z", "shell.execute_reply": "2022-04-27T09:16:57.213770Z" }, "id": "U729j5X5QQUQ" }, "outputs": [ { "data": { "text/html": [ "Artifact at /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6

" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From /tmpfs/src/tf_docs_env/lib/python3.7/site-packages/tensorflow_model_analysis/writers/metrics_plots_and_validations_writer.py:107: tf_record_iterator (from tensorflow.python.lib.io.tf_record) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use eager execution and: \n", "`tf.data.TFRecordDataset(path)`\n" ] } ], "source": [ "context.show(evaluator.outputs['evaluation'])" ] }, { "cell_type": "markdown", "metadata": { "id": "cdTLb_-8dLcu" }, "source": [ "###Model Card Generator\n", "The `Model Card` component is a [TFX Component](https://www.tensorflow.org/tfx/guide/understanding_tfx_pipelines#component) that generates model cards-- short documentation that provides key information about a machine learning model-- from the StatisticGen outputs, the Evaluator outputs, and a prepared json annotation. Optionally, a pushed model or a template can be provided as well. \n", "\n", "The model cards assets are saved to a ModelCard artifact that can be fetched from the `outputs['model_card]'` property." ] }, { "cell_type": "markdown", "metadata": { "id": "l8LY5bMUJsuM" }, "source": [ "#### Prepare Annotation Json for Model Card\n", "\n", "It is also important to document model information that might be important to downstream users, such as its limitations, intended use cases, trade offs, and ethical considerations. Thus, we will prepare this information in json format to be used in the model card generating step." ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:16:57.218643Z", "iopub.status.busy": "2022-04-27T09:16:57.218191Z", "iopub.status.idle": "2022-04-27T09:16:57.749440Z", "shell.execute_reply": "2022-04-27T09:16:57.748875Z" }, "id": "SIUiTor4Johj" }, "outputs": [], "source": [ "import json\n", "import model_card_toolkit as mctlib\n", "\n", "model_card_json = {'model_details': {'name': 'Census Income Classifier'}, \n", " 'model_details': {'overview': \n", " 'This is a wide and deep Keras model which aims to classify whether or not '\n", " 'an individual has an income of over $50,000 based on various demographic '\n", " 'features. The model is trained on the UCI Census Income Dataset. This is '\n", " 'not a production model, and this dataset has traditionally only been used '\n", " 'for research purposes. In this Model Card, you can review quantitative '\n", " 'components of the model’s performance and data, as well as information '\n", " 'about the model’s intended uses, limitations, and ethical considerations.'},\n", " 'model_details': {'owners': [{\"name\": \"Model Cards Team\", \"contact\": \"model-cards@google.com\"}]},\n", " 'considerations': {'use_cases':[{\"description\":'This dataset that this model was trained on was originally created to '\n", " 'support the machine learning community in conducting empirical analysis '\n", " 'of ML algorithms. The Adult Data Set can be used in fairness-related '\n", " 'studies that compare inequalities across sex and race, based on '\n", " 'people’s annual incomes.'}]},\n", " 'considerations': {'limitations': [{'description':\n", " 'This is a class-imbalanced dataset across a variety of sensitive classes.'\n", " ' The ratio of male-to-female examples is about 2:1 and there are far more'\n", " ' examples with the “white” attribute than every other race combined. '\n", " 'Furthermore, the ratio of $50,000 or less earners to $50,000 or more '\n", " 'earners is just over 3:1. Due to the imbalance across income levels, we '\n", " 'can see that our true negative rate seems quite high, while our true '\n", " 'positive rate seems quite low. This is true to an even greater degree '\n", " 'when we only look at the “female” sub-group, because there are even '\n", " 'fewer female examples in the $50,000+ earner group, causing our model to '\n", " 'overfit these examples. To avoid this, we can try various remediation '\n", " 'strategies in future iterations (e.g. undersampling, hyperparameter '\n", " 'tuning, etc), but we may not be able to fix all of the fairness issues.'}]}, \n", " 'considerations': {'ethical_considerations': [\n", " {'name': 'We risk expressing the viewpoint that the attributes in this dataset '\n", " 'are the only ones that are predictive of someone’s income, even '\n", " 'though we know this is not the case.', \n", " 'mitigation_strategy': 'As mentioned, some interventions may need to be '\n", " 'performed to address the class imbalances in the dataset.'}]}\n", " }" ] }, { "cell_type": "markdown", "metadata": { "id": "SOYofSZKOMZx" }, "source": [ "#### Generate the Model Card.\n" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:16:57.753146Z", "iopub.status.busy": "2022-04-27T09:16:57.752747Z", "iopub.status.idle": "2022-04-27T09:17:05.868427Z", "shell.execute_reply": "2022-04-27T09:17:05.867840Z" }, "id": "bspjHq6u5aFf" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running driver for ModelCardGenerator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running executor for ModelCardGenerator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:EvalResult found at path /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Reading stats artifact from Split-train\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Reading stats artifact from Split-eval\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:Running publisher for ModelCardGenerator\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "INFO:absl:MetadataStore with DB connection initialized\n" ] }, { "data": { "text/html": [ "\n", "\n", "
ExecutionResult at 0x7f40c8171b90
.execution_id7
.component\n", "\n", "
ModelCardGenerator at 0x7f46fc72d7d0
.inputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
['evaluation']\n", "\n", "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7f471c462650
.type_nameModelEvaluation
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelEvaluation' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6) at 0x7f499c890850
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6
.outputs
['model_card']\n", "\n", "
Channel of type 'ModelCard' (1 artifact) at 0x7f4994385150
.type_nameModelCard
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelCard' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7) at 0x7f423c816d90
.type<class 'model_card_toolkit.tfx.artifact.ModelCard'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7
.exec_properties
['json']{"model_details": {"owners": [{"name": "Model Cards Team", "contact": "model-cards@google.com"}]}, "considerations": {"ethical_considerations": [{"name": "We risk expressing the viewpoint that the attributes in this dataset are the only ones that are predictive of someone\\u2019s income, even though we know this is not the case.", "mitigation_strategy": "As mentioned, some interventions may need to be performed to address the class imbalances in the dataset."}]}}
['template_io']None
.component.inputs
['statistics']\n", "\n", "
Channel of type 'ExampleStatistics' (1 artifact) at 0x7f4993efbfd0
.type_nameExampleStatistics
._artifacts
[0]\n", "\n", "
Artifact of type 'ExampleStatistics' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2) at 0x7f49af411850
.type<class 'tfx.types.standard_artifacts.ExampleStatistics'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/StatisticsGen/statistics/2
.span0
.split_names["train", "eval"]
['evaluation']\n", "\n", "
Channel of type 'ModelEvaluation' (1 artifact) at 0x7f471c462650
.type_nameModelEvaluation
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelEvaluation' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6) at 0x7f499c890850
.type<class 'tfx.types.standard_artifacts.ModelEvaluation'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/Evaluator/evaluation/6
.component.outputs
['model_card']\n", "\n", "
Channel of type 'ModelCard' (1 artifact) at 0x7f4994385150
.type_nameModelCard
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelCard' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7) at 0x7f423c816d90
.type<class 'model_card_toolkit.tfx.artifact.ModelCard'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7
" ], "text/plain": [ "ExecutionResult(\n", " component_id: ModelCardGenerator\n", " execution_id: 7\n", " outputs:\n", " model_card: Channel(\n", " type_name: ModelCard\n", " artifacts: [Artifact(artifact: id: 16\n", " type_id: 30\n", " uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7\"\n", " custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model_card\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"ModelCardGenerator\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", " }\n", " custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", " }\n", " state: LIVE\n", " , artifact_type: id: 30\n", " name: \"ModelCard\"\n", " )]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", " ))" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from model_card_toolkit.tfx.component import ModelCardGenerator\n", "\n", "mct_gen = ModelCardGenerator(statistics=statistics_gen.outputs['statistics'],\n", " evaluation=evaluator.outputs['evaluation'],\n", " json=json.dumps(model_card_json))\n", "context.run(mct_gen)\n" ] }, { "cell_type": "code", "execution_count": 33, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:17:05.871722Z", "iopub.status.busy": "2022-04-27T09:17:05.871366Z", "iopub.status.idle": "2022-04-27T09:17:05.876502Z", "shell.execute_reply": "2022-04-27T09:17:05.875920Z" }, "id": "euskJ-0qM6nV" }, "outputs": [ { "data": { "text/html": [ "\n", "\n", "
Channel of type 'ModelCard' (1 artifact) at 0x7f4994385150
.type_nameModelCard
._artifacts
[0]\n", "\n", "
Artifact of type 'ModelCard' (uri: /tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7) at 0x7f423c816d90
.type<class 'model_card_toolkit.tfx.artifact.ModelCard'>
.uri/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7
" ], "text/plain": [ "Channel(\n", " type_name: ModelCard\n", " artifacts: [Artifact(artifact: id: 16\n", "type_id: 30\n", "uri: \"/tmpfs/tmp/tfx-Census Income Classification Pipeline-9ikbb3ld/ModelCardGenerator/model_card/7\"\n", "custom_properties {\n", " key: \"name\"\n", " value {\n", " string_value: \"model_card\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"producer_component\"\n", " value {\n", " string_value: \"ModelCardGenerator\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"state\"\n", " value {\n", " string_value: \"published\"\n", " }\n", "}\n", "custom_properties {\n", " key: \"tfx_version\"\n", " value {\n", " string_value: \"1.5.0\"\n", " }\n", "}\n", "state: LIVE\n", ", artifact_type: id: 30\n", "name: \"ModelCard\"\n", ")]\n", " additional_properties: {}\n", " additional_custom_properties: {}\n", ")" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "mct_gen.outputs['model_card']" ] }, { "cell_type": "markdown", "metadata": { "id": "0kj1szHNdqrX" }, "source": [ "##Display Model Card\n", "\n", "Lastly, we isolate the uri from the model card generator artifact and use it to display the model card." ] }, { "cell_type": "code", "execution_count": 34, "metadata": { "execution": { "iopub.execute_input": "2022-04-27T09:17:05.879611Z", "iopub.status.busy": "2022-04-27T09:17:05.879134Z", "iopub.status.idle": "2022-04-27T09:17:05.911197Z", "shell.execute_reply": "2022-04-27T09:17:05.910660Z" }, "id": "Sd68Ih928vr9" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['template', 'model_cards', 'data']\n" ] }, { "data": { "text/html": [ "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", "\n", " Model Card for None\n", "\n", "\n", "\n", "

\n", " Model Card for None\n", "

\n", "
\n", "
\n", "

Model Details

\n", " \n", " \n", " \n", "

Owners

\n", " \n", " Model Cards Team, model-cards@google.com\n", " \n", " \n", " \n", " \n", " \n", "
\n", " \n", " \n", "
\n", "

Considerations

\n", " \n", " \n", " \n", " \n", " \n", "

Ethical Considerations

\n", "
    \n", "
  • \n", "
    Risk: We risk expressing the viewpoint that the attributes in this dataset are the only ones that are predictive of someone’s income, even though we know this is not the case.
    \n", "
    Mitigation Strategy: As mentioned, some interventions may need to be performed to address the class imbalances in the dataset.
    \n", "
\n", "
\n", " \n", "
\n", " \n", " \n", "
\n", "

Datasets

\n", " \n", "
\n", "
\n", " \n", " \n", " \n", " \n", " \n", "
\n", " \n", " \n", "
\n", " counts | Age\n", "
\n", " \n", "
\n", " counts | Capital-Gain\n", "
\n", " \n", "
\n", " counts | Capital-Loss\n", "
\n", " \n", "
\n", " counts | Country\n", "
\n", " \n", "
\n", " counts | Education\n", "
\n", " \n", "
\n", " counts | Education-Num\n", "
\n", " \n", "
\n", " counts | Hours-per-week\n", "
\n", " \n", "
\n", " counts | Marital-Status\n", "
\n", " \n", "
\n", " counts | Occupation\n", "
\n", " \n", "
\n", " counts | Over-50K\n", "
\n", " \n", "
\n", " counts | Race\n", "
\n", " \n", "
\n", " counts | Relationship\n", "
\n", " \n", "
\n", " counts | Sex\n", "
\n", " \n", "
\n", " counts | Workclass\n", "
\n", " \n", "
\n", " counts | fnlwgt\n", "
\n", " \n", "
\n", "\n", " \n", "
\n", "
\n", " \n", "
\n", "
\n", " \n", " \n", " \n", " \n", " \n", "
\n", " \n", " \n", "
\n", " counts | Country\n", "
\n", " \n", "
\n", " counts | Education\n", "
\n", " \n", "
\n", " counts | Marital-Status\n", "
\n", " \n", "
\n", " counts | Occupation\n", "
\n", " \n", "
\n", " counts | Race\n", "
\n", " \n", "
\n", " counts | Relationship\n", "
\n", " \n", "
\n", " counts | Sex\n", "
\n", " \n", "
\n", " counts | Workclass\n", "
\n", " \n", "
\n", " counts | Age\n", "
\n", " \n", "
\n", " counts | Capital-Gain\n", "
\n", " \n", "
\n", " counts | Capital-Loss\n", "
\n", " \n", "
\n", " counts | Education-Num\n", "
\n", " \n", "
\n", " counts | Hours-per-week\n", "
\n", " \n", "
\n", " counts | Over-50K\n", "
\n", " \n", "
\n", " counts | fnlwgt\n", "
\n", " \n", "
\n", "\n", " \n", "
\n", "
\n", " \n", "
\n", "\n", " \n", " \n", " \n", "
\n", "

Quantitative Analysis

\n", " \n", " \n", "\n", " \n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", " \n", "\n", "
Performance Metrics
NameValue
\n", "binary_accuracy, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.927536231884058 (None, None)\n", "
\n", "loss, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.318501740694046 (None, None)\n", "
\n", "example_count, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "69.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.625 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.375 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "1.0 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.043478260869565216 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.9565217391304348 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Male\n", "\n", "0.07575757575757576 (None, None)\n", "
\n", "binary_accuracy, Sex_ Male\n", "\n", "0.7583845723868082 (None, None)\n", "
\n", "loss, Sex_ Male\n", "\n", "0.510524570941925 (None, None)\n", "
\n", "example_count, Sex_ Male\n", "\n", "7156.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Sex_ Male\n", "\n", "0.027010683329973795 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Sex_ Male\n", "\n", "0.7266514806378133 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Sex_ Male\n", "\n", "0.2733485193621868 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Sex_ Male\n", "\n", "0.9729893166700262 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Sex_ Male\n", "\n", "0.10257126886528788 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Sex_ Male\n", "\n", "0.8974287311347121 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Sex_ Male\n", "\n", "0.18256130790190736 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Sex_ Male\n", "\n", "0.24836499532855807 (None, None)\n", "
\n", "binary_accuracy\n", "\n", "0.8080779944289693 (None, None)\n", "
\n", "loss\n", "\n", "0.43835365772247314 (None, None)\n", "
\n", "example_count\n", "\n", "10770.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5\n", "\n", "0.019951040391676868 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5\n", "\n", "0.7323076923076923 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5\n", "\n", "0.2676923076923077 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5\n", "\n", "0.9800489596083232 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5\n", "\n", "0.0797585886722377 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5\n", "\n", "0.9202414113277623 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5\n", "\n", "0.18975552968568102 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5\n", "\n", "0.19210977701543738 (None, None)\n", "
\n", "binary_accuracy, Race_ White\n", "\n", "0.7976319791440365 (None, None)\n", "
\n", "loss, Race_ White\n", "\n", "0.4528079330921173 (None, None)\n", "
\n", "example_count, Race_ White\n", "\n", "9206.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White\n", "\n", "0.021437946623887996 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White\n", "\n", "0.7305236270753512 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White\n", "\n", "0.26947637292464877 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White\n", "\n", "0.978562053376112 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ White\n", "\n", "0.08472735172713448 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ White\n", "\n", "0.9152726482728655 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White\n", "\n", "0.18846153846153846 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White\n", "\n", "0.20365535248041775 (None, None)\n", "
\n", "binary_accuracy, Race_ White_X_Sex_ Male\n", "\n", "0.7501590330788804 (None, None)\n", "
\n", "loss, Race_ White_X_Sex_ Male\n", "\n", "0.5205867886543274 (None, None)\n", "
\n", "example_count, Race_ White_X_Sex_ Male\n", "\n", "6288.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.028211704359990673 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.7253626813406704 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.27463731865932967 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.9717882956400093 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.10655216284987278 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.8934478371501272 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.18059701492537314 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White_X_Sex_ Male\n", "\n", "0.2580989676041296 (None, None)\n", "
\n", "binary_accuracy, Race_ White_X_Sex_ Female\n", "\n", "0.8999314599040439 (None, None)\n", "
\n", "loss, Race_ White_X_Sex_ Female\n", "\n", "0.3067510724067688 (None, None)\n", "
\n", "example_count, Race_ White_X_Sex_ Female\n", "\n", "2918.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.010124610591900311 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.76 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.24 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.9898753894080997 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.03769705277587389 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.9623029472241261 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.23636363636363636 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ White_X_Sex_ Female\n", "\n", "0.09472934472934473 (None, None)\n", "
\n", "binary_accuracy, Race_ Black_X_Sex_ Male\n", "\n", "0.8333333333333334 (None, None)\n", "
\n", "loss, Race_ Black_X_Sex_ Male\n", "\n", "0.42823582887649536 (None, None)\n", "
\n", "example_count, Race_ Black_X_Sex_ Male\n", "\n", "522.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.012077294685990338 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.7592592592592593 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.24074074074074073 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.9879227053140096 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.05938697318007663 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.9406130268199234 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.16129032258064516 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Black_X_Sex_ Male\n", "\n", "0.1670061099796334 (None, None)\n", "
\n", "binary_accuracy, Race_ Other\n", "\n", "0.9340659340659341 (None, None)\n", "
\n", "loss, Race_ Other\n", "\n", "0.26453760266304016 (None, None)\n", "
\n", "example_count, Race_ Other\n", "\n", "91.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Other\n", "\n", "0.012195121951219513 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other\n", "\n", "0.5555555555555556 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Other\n", "\n", "0.4444444444444444 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other\n", "\n", "0.9878048780487805 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Other\n", "\n", "0.054945054945054944 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Other\n", "\n", "0.945054945054945 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Other\n", "\n", "0.2 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Other\n", "\n", "0.05813953488372093 (None, None)\n", "
\n", "binary_accuracy, Sex_ Female\n", "\n", "0.9064748201438849 (None, None)\n", "
\n", "loss, Sex_ Female\n", "\n", "0.2954496145248413 (None, None)\n", "
\n", "example_count, Sex_ Female\n", "\n", "3614.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Sex_ Female\n", "\n", "0.009037083203490184 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Sex_ Female\n", "\n", "0.762962962962963 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Sex_ Female\n", "\n", "0.23703703703703705 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Sex_ Female\n", "\n", "0.9909629167965098 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Sex_ Female\n", "\n", "0.034587714443829555 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Sex_ Female\n", "\n", "0.9654122855561704 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Sex_ Female\n", "\n", "0.232 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Sex_ Female\n", "\n", "0.0885640584694755 (None, None)\n", "
\n", "binary_accuracy, Race_ Black_X_Sex_ Female\n", "\n", "0.944 (None, None)\n", "
\n", "loss, Race_ Black_X_Sex_ Female\n", "\n", "0.22781124711036682 (None, None)\n", "
\n", "example_count, Race_ Black_X_Sex_ Female\n", "\n", "500.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.004282655246252677 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.7878787878787878 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.21212121212121213 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.9957173447537473 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.018 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.982 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.2222222222222222 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Black_X_Sex_ Female\n", "\n", "0.05295315682281059 (None, None)\n", "
\n", "binary_accuracy, Race_ Other_X_Sex_ Female\n", "\n", "0.9722222222222222 (None, None)\n", "
\n", "loss, Race_ Other_X_Sex_ Female\n", "\n", "0.18923574686050415 (None, None)\n", "
\n", "example_count, Race_ Other_X_Sex_ Female\n", "\n", "36.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "0.3333333333333333 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "0.6666666666666666 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "1.0 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "0.05555555555555555 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "0.9444444444444444 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Other_X_Sex_ Female\n", "\n", "0.029411764705882353 (None, None)\n", "
\n", "binary_accuracy, Race_ Asian-Pac-Islander\n", "\n", "0.7877906976744186 (None, None)\n", "
\n", "loss, Race_ Asian-Pac-Islander\n", "\n", "0.45436933636665344 (None, None)\n", "
\n", "example_count, Race_ Asian-Pac-Islander\n", "\n", "344.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.027450980392156862 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.7415730337078652 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.25842696629213485 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.9725490196078431 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.0872093023255814 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.9127906976744186 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.23333333333333334 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander\n", "\n", "0.21019108280254778 (None, None)\n", "
\n", "binary_accuracy, Race_ Black\n", "\n", "0.8874755381604696 (None, None)\n", "
\n", "loss, Race_ Black\n", "\n", "0.3301807641983032 (None, None)\n", "
\n", "example_count, Race_ Black\n", "\n", "1022.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Black\n", "\n", "0.007945516458569807 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Black\n", "\n", "0.7659574468085106 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Black\n", "\n", "0.23404255319148937 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Black\n", "\n", "0.9920544835414302 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Black\n", "\n", "0.03913894324853229 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Black\n", "\n", "0.9608610567514677 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Black\n", "\n", "0.175 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Black\n", "\n", "0.109979633401222 (None, None)\n", "
\n", "binary_accuracy, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.7252252252252253 (None, None)\n", "
\n", "loss, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.5274245142936707 (None, None)\n", "
\n", "example_count, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "222.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.0472972972972973 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.7297297297297297 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.2702702702702703 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.9527027027027027 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.12162162162162163 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.8783783783783784 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.25925925925925924 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Male\n", "\n", "0.27692307692307694 (None, None)\n", "
\n", "binary_accuracy, Race_ Other_X_Sex_ Male\n", "\n", "0.9090909090909091 (None, None)\n", "
\n", "loss, Race_ Other_X_Sex_ Male\n", "\n", "0.3138260245323181 (None, None)\n", "
\n", "example_count, Race_ Other_X_Sex_ Male\n", "\n", "55.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.02040816326530612 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.6666666666666666 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.3333333333333333 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.9795918367346939 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.05454545454545454 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.9454545454545454 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.3333333333333333 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Other_X_Sex_ Male\n", "\n", "0.07692307692307693 (None, None)\n", "
\n", "binary_accuracy, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.868421052631579 (None, None)\n", "
\n", "loss, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.3347950577735901 (None, None)\n", "
\n", "example_count, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "38.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.029411764705882353 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "1.0 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.9705882352941176 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.02631578947368421 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.9736842105263158 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "1.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo_X_Sex_ Female\n", "\n", "0.10810810810810811 (None, None)\n", "
\n", "binary_accuracy, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.9016393442622951 (None, None)\n", "
\n", "loss, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.3214329183101654 (None, None)\n", "
\n", "example_count, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "122.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.8 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.2 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "1.0 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.02459016393442623 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.9754098360655737 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Asian-Pac-Islander_X_Sex_ Female\n", "\n", "0.10084033613445378 (None, None)\n", "
\n", "binary_accuracy, Race_ Amer-Indian-Eskimo\n", "\n", "0.9065420560747663 (None, None)\n", "
\n", "loss, Race_ Amer-Indian-Eskimo\n", "\n", "0.32428810000419617 (None, None)\n", "
\n", "example_count, Race_ Amer-Indian-Eskimo\n", "\n", "107.0 (None, None)\n", "
\n", "fairness_indicators_metrics/false_positive_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.010526315789473684 (None, None)\n", "
\n", "fairness_indicators_metrics/false_negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.75 (None, None)\n", "
\n", "fairness_indicators_metrics/true_positive_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.25 (None, None)\n", "
\n", "fairness_indicators_metrics/true_negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.9894736842105263 (None, None)\n", "
\n", "fairness_indicators_metrics/positive_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.037383177570093455 (None, None)\n", "
\n", "fairness_indicators_metrics/negative_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.9626168224299065 (None, None)\n", "
\n", "fairness_indicators_metrics/false_discovery_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.25 (None, None)\n", "
\n", "fairness_indicators_metrics/false_omission_rate@0.5, Race_ Amer-Indian-Eskimo\n", "\n", "0.08737864077669903 (None, None)\n", "
\n", "\n", " \n", " \n", " \n", "
\n", "
\n", " None\n", " \n", "
\n", " \n", " \n", "
\n", " fairness_indicators_metrics/false_discovery_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_discovery_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_discovery_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_positive_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_positive_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_positive_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_negative_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_negative_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_negative_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/negative_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/negative_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/negative_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/positive_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/positive_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/positive_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_omission_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_omission_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/false_omission_rate@0.5 | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_positive_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_positive_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_positive_rate@0.5 | Race\n", "
\n", " \n", "
\n", " binary_accuracy | Sex\n", "
\n", " \n", "
\n", " binary_accuracy | Race, Sex\n", "
\n", " \n", "
\n", " binary_accuracy | Race\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_negative_rate@0.5 | Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_negative_rate@0.5 | Race, Sex\n", "
\n", " \n", "
\n", " fairness_indicators_metrics/true_negative_rate@0.5 | Race\n", "
\n", " \n", "
\n", " loss | Sex\n", "
\n", " \n", "
\n", " loss | Race, Sex\n", "
\n", " \n", "
\n", " loss | Race\n", "
\n", " \n", "
\n", " example_count | Sex\n", "
\n", " \n", "
\n", " example_count | Race, Sex\n", "
\n", " \n", "
\n", " example_count | Race\n", "
\n", " \n", "
\n", "\n", "
\n", "
\n", "\n", " \n", "
\n", "\n", " \n", "\n", "" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from IPython import display\n", "\n", "mct_artifact = mct_gen.outputs['model_card'].get()[0]\n", "mct_uri = mct_artifact.uri\n", "\n", "print(os.listdir(mct_uri))\n", "\n", "mct_path = os.path.join(mct_uri, 'model_cards', 'model_card.html')\n", "with open(mct_path) as f:\n", " mct_content = f.read()\n", "\n", "\n", "display.display(display.HTML(mct_content))" ] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "MLMD Model Card Toolkit Demo.ipynb", "private_outputs": true, "provenance": [], "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.13" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": { "83af4896b22348e6bc7f008a371a911e": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "b05013d6449043baacf3d8a9bfa876b7": { "model_module": "tensorflow_model_analysis", "model_module_version": "0.36.0", "model_name": "SlicingMetricsModel", "state": { "_dom_classes": [], "_model_module": "tensorflow_model_analysis", "_model_module_version": "0.36.0", "_model_name": "SlicingMetricsModel", "_view_count": null, "_view_module": "tensorflow_model_analysis", "_view_module_version": "0.36.0", "_view_name": "SlicingMetricsView", "config": { "weightedExamplesColumn": "example_count" }, "data": [ { "metrics": { "": { "": { "binary_accuracy": { "boundedValue": { "lowerBound": 0.8001110432149915, "upperBound": 0.8160458061253159, "value": 0.8080779944289693 } }, "example_count": { "doubleValue": 10770.0 }, "fairness_indicators_metrics/false_discovery_rate@0.5": { "boundedValue": { "lowerBound": 0.16026537832155766, "upperBound": 0.21924545262391723, "value": 0.18975552968568102 } }, "fairness_indicators_metrics/false_negative_rate@0.5": { "boundedValue": { "lowerBound": 0.7057978694752957, "upperBound": 0.7588176389541731, "value": 0.7323076923076923 } }, "fairness_indicators_metrics/false_omission_rate@0.5": { "boundedValue": { "lowerBound": 0.18325558726043828, "upperBound": 0.20096292718150172, "value": 0.19210977701543738 } }, "fairness_indicators_metrics/false_positive_rate@0.5": { "boundedValue": { "lowerBound": 0.01643652028705217, "upperBound": 0.02346485922418594, "value": 0.019951040391676868 } }, "fairness_indicators_metrics/negative_rate@0.5": { "boundedValue": { "lowerBound": 0.9122209516352575, "upperBound": 0.9282616744392354, "value": 0.9202414113277623 } }, "fairness_indicators_metrics/positive_rate@0.5": { "boundedValue": { "lowerBound": 0.07173832556076483, "upperBound": 0.08777904836474208, "value": 0.0797585886722377 } }, "fairness_indicators_metrics/true_negative_rate@0.5": { "boundedValue": { "lowerBound": 0.9765351407758143, "upperBound": 0.9835634797129476, "value": 0.9800489596083232 } }, "fairness_indicators_metrics/true_positive_rate@0.5": { "boundedValue": { "lowerBound": 0.24118236104582708, "upperBound": 0.2942021305247044, "value": 0.2676923076923077 } }, "loss": { "boundedValue": { "lowerBound": 0.4273900745432548, "upperBound": 0.44931633491109096, "value": 0.43835365772247314 } } } } }, "slice": "Overall" } ], "js_events": [], "layout": "IPY_MODEL_83af4896b22348e6bc7f008a371a911e" } } }, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 0 }