我尝试使用带有此 pyspark 代码的 pyspark 读取 csv 文件:tr_df = spark.read.csv("/data/file.csv", header=True, inferSchema=True )tr_df.head(5)但我得到这个错误:ValueError Traceback (most recent call last)<ipython-input-53-03432bbf269d> in <module>----> 1 tr_df.head(5)~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/sql/dataframe.py在 head(self, n) 1250 rs = self.head(1) 1251 return rs[0] if rs else None -> 1252 return self.take(n) 1253 1254 @ignore_unicode_prefix~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/sql/dataframe.pyin take(self, num) 569 [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')] 570 """ --> 571 return self.limit(数).collect() 572 573 @since(1.3)~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/sql/dataframe.py在 collect(self) 532 with SCCallSiteSync(self._sc) as css: 533 sock_info = self._jdf.collectToPython() --> 534 return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) 535 536 @ignore_unicode_prefix~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/serializers.py在 load_stream(self, stream) 145 while True: 146 try: --> 147 yield self._read_with_length(stream) 148 except EOFError: 149 return~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/serializers.py在 _read_with_length(self, stream) 170 if len(obj) < length: 171 raise EOFError --> 172 return self.loads(obj) 173 174 def dumps(self, obj):~/anaconda3/envs/naboo-env/lib/python3.6/site-packages/pyspark/serializers.py在加载(自我,obj,编码)578 如果 sys.version >= '3':579 def 加载(自我,obj,编码 =“字节”):-> 580 返回pickle.loads(obj,编码=编码) 581 else: 582 def 加载(self, obj, encoding=None):谁能帮我解决这个问题?
1 回答
慕尼黑的夜晚无繁华
TA贡献1864条经验 获得超6个赞
似乎您的一列中的数据类型存在问题。因此它的抛出错误。阅读时删除 inferSchema =True 选项。读取数据后,尝试分析数据类型并根据需要进行任何更正,然后应用您自己的模式。
添加回答
举报
0/150
提交
取消