Skip to content

Commit 5556cfc

Browse files
LuciferYangsrowen
authored andcommitted
[SPARK-39298][CORE][SQL][DSTREAM][GRAPHX][ML][MLLIB][SS][YARN] Replace constructing ranges of collection indices manually with .indices
### What changes were proposed in this pull request? This pr is a trivial change: use `Seq.indices` instead of constructing ranges of collection indices manually. **Before** ```scala var x: Seq[Int] Range(0, x.size) 0 until x.size 0.to(x.size - 1) ``` **After** ```scala var x: Seq[Int] x.indices ``` ### Why are the changes needed? Use API instead of manual coding. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? Pass Github Actions Closes apache#36679 from LuciferYang/seq-indices. Authored-by: yangjie01 <[email protected]> Signed-off-by: Sean Owen <[email protected]>
1 parent f8c544b commit 5556cfc

File tree

49 files changed

+71
-71
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+71
-71
lines changed

connector/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -2281,7 +2281,7 @@ abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
22812281
val headers = row.getList[Row](row.fieldIndex("headers")).asScala
22822282
assert(headers.length === expected.length)
22832283

2284-
(0 until expected.length).foreach { idx =>
2284+
expected.indices.foreach { idx =>
22852285
val key = headers(idx).getAs[String]("key")
22862286
val value = headers(idx).getAs[Array[Byte]]("value")
22872287
assert(key === expected(idx)._1)

connector/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaDataConsumerSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ class KafkaDataConsumerSuite extends SparkFunSuite with MockitoSugar with Before
133133
val consumer = KafkaDataConsumer.acquire[Array[Byte], Array[Byte]](
134134
topicPartition, kafkaParams, taskContext, useCache)
135135
try {
136-
val rcvd = (0 until data.length).map { offset =>
136+
val rcvd = data.indices.map { offset =>
137137
val bytes = consumer.get(offset, 10000).value()
138138
new String(bytes)
139139
}

connector/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisBackedBlockRDDSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ abstract class KinesisBackedBlockRDDTests(aggregateTestData: Boolean)
9696
allRanges.map { range => SequenceNumberRanges(Array(range)) }.toArray
9797
).map { bytes => new String(bytes).toInt }.collectPartitions()
9898
assert(receivedData3.length === allRanges.size)
99-
for (i <- 0 until allRanges.size) {
99+
for (i <- allRanges.indices) {
100100
assert(receivedData3(i).toSeq === shardIdToData(allRanges(i).shardId))
101101
}
102102
}

core/src/main/scala/org/apache/spark/MapOutputTracker.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -965,7 +965,7 @@ private[spark] class MapOutputTrackerMaster(
965965
statuses.length.toLong * totalSizes.length / parallelAggThreshold + 1).toInt
966966
if (parallelism <= 1) {
967967
statuses.filter(_ != null).foreach { s =>
968-
for (i <- 0 until totalSizes.length) {
968+
for (i <- totalSizes.indices) {
969969
totalSizes(i) += s.getSizeForBlock(i)
970970
}
971971
}

core/src/main/scala/org/apache/spark/SparkContext.scala

+4-4
Original file line numberDiff line numberDiff line change
@@ -2277,7 +2277,7 @@ class SparkContext(config: SparkConf) extends Logging {
22772277
* a result from one partition)
22782278
*/
22792279
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
2280-
runJob(rdd, func, 0 until rdd.partitions.length)
2280+
runJob(rdd, func, rdd.partitions.indices)
22812281
}
22822282

22832283
/**
@@ -2289,7 +2289,7 @@ class SparkContext(config: SparkConf) extends Logging {
22892289
* a result from one partition)
22902290
*/
22912291
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
2292-
runJob(rdd, func, 0 until rdd.partitions.length)
2292+
runJob(rdd, func, rdd.partitions.indices)
22932293
}
22942294

22952295
/**
@@ -2304,7 +2304,7 @@ class SparkContext(config: SparkConf) extends Logging {
23042304
rdd: RDD[T],
23052305
processPartition: (TaskContext, Iterator[T]) => U,
23062306
resultHandler: (Int, U) => Unit): Unit = {
2307-
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
2307+
runJob[T, U](rdd, processPartition, rdd.partitions.indices, resultHandler)
23082308
}
23092309

23102310
/**
@@ -2319,7 +2319,7 @@ class SparkContext(config: SparkConf) extends Logging {
23192319
processPartition: Iterator[T] => U,
23202320
resultHandler: (Int, U) => Unit): Unit = {
23212321
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
2322-
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
2322+
runJob[T, U](rdd, processFunc, rdd.partitions.indices, resultHandler)
23232323
}
23242324

23252325
/**

core/src/main/scala/org/apache/spark/deploy/master/Master.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,7 @@ private[deploy] class Master(
742742
val assignedCores = scheduleExecutorsOnWorkers(app, usableWorkers, spreadOutApps)
743743

744744
// Now that we've decided how many cores to allocate on each worker, let's allocate them
745-
for (pos <- 0 until usableWorkers.length if assignedCores(pos) > 0) {
745+
for (pos <- usableWorkers.indices if assignedCores(pos) > 0) {
746746
allocateWorkerResourceToExecutors(
747747
app, assignedCores(pos), app.desc.coresPerExecutor, usableWorkers(pos))
748748
}

core/src/main/scala/org/apache/spark/metrics/ExecutorMetricType.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ private[spark] object ExecutorMetricType {
208208
var numberOfMetrics = 0
209209
val definedMetricsAndOffset = mutable.LinkedHashMap.empty[String, Int]
210210
metricGetters.foreach { m =>
211-
(0 until m.names.length).foreach { idx =>
211+
m.names.indices.foreach { idx =>
212212
definedMetricsAndOffset += (m.names(idx) -> (idx + numberOfMetrics))
213213
}
214214
numberOfMetrics += m.names.length

core/src/main/scala/org/apache/spark/rdd/BlockRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ class BlockRDD[T: ClassTag](sc: SparkContext, @transient val blockIds: Array[Blo
3636

3737
override def getPartitions: Array[Partition] = {
3838
assertValid()
39-
(0 until blockIds.length).map { i =>
39+
blockIds.indices.map { i =>
4040
new BlockRDDPartition(blockIds(i), i).asInstanceOf[Partition]
4141
}.toArray
4242
}

core/src/main/scala/org/apache/spark/rdd/CoGroupedRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ class CoGroupedRDD[K: ClassTag](
110110

111111
override def getPartitions: Array[Partition] = {
112112
val array = new Array[Partition](part.numPartitions)
113-
for (i <- 0 until array.length) {
113+
for (i <- array.indices) {
114114
// Each CoGroupPartition will have a dependency per contributing RDD
115115
array(i) = new CoGroupPartition(i, rdds.zipWithIndex.map { case (rdd, j) =>
116116
// Assume each RDD contributed a single dependency, and get it

core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ class NewHadoopRDD[K, V](
156156
}
157157

158158
val result = new Array[Partition](rawSplits.size)
159-
for (i <- 0 until rawSplits.size) {
159+
for (i <- rawSplits.indices) {
160160
result(i) =
161161
new NewHadoopPartition(id, i, rawSplits(i).asInstanceOf[InputSplit with Writable])
162162
}

core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ private[spark] class SubtractedRDD[K: ClassTag, V: ClassTag, W: ClassTag](
7070

7171
override def getPartitions: Array[Partition] = {
7272
val array = new Array[Partition](part.numPartitions)
73-
for (i <- 0 until array.length) {
73+
for (i <- array.indices) {
7474
// Each CoGroupPartition will depend on rdd1 and rdd2
7575
array(i) = new CoGroupPartition(i, Seq(rdd1, rdd2).zipWithIndex.map { case (rdd, j) =>
7676
dependencies(j) match {

core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ private[spark] class TaskSchedulerImpl(
380380
var minLaunchedLocality: Option[TaskLocality] = None
381381
// nodes and executors that are excluded for the entire application have already been
382382
// filtered out by this point
383-
for (i <- 0 until shuffledOffers.size) {
383+
for (i <- shuffledOffers.indices) {
384384
val execId = shuffledOffers(i).executorId
385385
val host = shuffledOffers(i).host
386386
val taskSetRpID = taskSet.taskSet.resourceProfileId

core/src/main/scala/org/apache/spark/storage/BlockManager.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -2040,7 +2040,7 @@ private[spark] object BlockManager {
20402040
}
20412041

20422042
val blockManagers = new HashMap[BlockId, Seq[String]]
2043-
for (i <- 0 until blockIds.length) {
2043+
for (i <- blockIds.indices) {
20442044
blockManagers(blockIds(i)) = blockLocations(i).map { loc =>
20452045
ExecutorCacheTaskLocation(loc.host, loc.executorId).toString
20462046
}

core/src/main/scala/org/apache/spark/storage/memory/MemoryStore.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ private[spark] class MemoryStore(
504504
try {
505505
logInfo(s"${selectedBlocks.size} blocks selected for dropping " +
506506
s"(${Utils.bytesToString(freedMemory)} bytes)")
507-
(0 until selectedBlocks.size).foreach { idx =>
507+
selectedBlocks.indices.foreach { idx =>
508508
val blockId = selectedBlocks(idx)
509509
val entry = entries.synchronized {
510510
entries.get(blockId)

core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ class BarrierStageOnSubmittedSuite extends SparkFunSuite with LocalSparkContext
4848
val futureAction = sc.submitJob(
4949
rdd,
5050
(iter: Iterator[Int]) => iter.toArray,
51-
partitions.getOrElse(0 until rdd.partitions.length),
51+
partitions.getOrElse(rdd.partitions.indices),
5252
{ case (_, _) => return }: (Int, Array[Int]) => Unit,
5353
{ return }
5454
)

core/src/test/scala/org/apache/spark/rdd/ParallelCollectionSplitSuite.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
117117
val r = ParallelCollectionRDD.slice(1 to 7, 4)
118118
val nr = ParallelCollectionRDD.slice(1L to 7L, 4)
119119
assert(r.size === 4)
120-
for (i <- 0 until r.size) {
120+
for (i <- r.indices) {
121121
assert(r(i).size === nr(i).size)
122122
}
123123
}
@@ -126,7 +126,7 @@ class ParallelCollectionSplitSuite extends SparkFunSuite with Checkers {
126126
val r = ParallelCollectionRDD.slice(List(1, 2), 4)
127127
val nr = ParallelCollectionRDD.slice(1L to 2L, 4)
128128
assert(r.size === 4)
129-
for (i <- 0 until r.size) {
129+
for (i <- r.indices) {
130130
assert(r(i).size === nr(i).size)
131131
}
132132
}

core/src/test/scala/org/apache/spark/scheduler/AQEShuffledRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class CoalescedPartitioner(val parent: Partitioner, val partitionStartIndices: A
3838
@transient private lazy val parentPartitionMapping: Array[Int] = {
3939
val n = parent.numPartitions
4040
val result = new Array[Int](n)
41-
for (i <- 0 until partitionStartIndices.length) {
41+
for (i <- partitionStartIndices.indices) {
4242
val start = partitionStartIndices(i)
4343
val end = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
4444
for (j <- start until end) {

core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ class CoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkCo
408408
sc.submitJob(
409409
rdd,
410410
(iter: Iterator[Int]) => iter.toArray,
411-
0 until rdd.partitions.length,
411+
rdd.partitions.indices,
412412
{ case (_, _) => return }: (Int, Array[Int]) => Unit,
413413
{ return }
414414
)

core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -1806,7 +1806,7 @@ class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with Ti
18061806
// now we should submit stage 1, and the map output from stage 0 should be registered
18071807

18081808
// check that we have all the map output for stage 0
1809-
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
1809+
reduceRdd.partitions.indices.foreach { reduceIdx =>
18101810
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
18111811
// really we should have already thrown an exception rather than fail either of these
18121812
// asserts, but just to be extra defensive let's double check the statuses are OK

core/src/test/scala/org/apache/spark/scheduler/OutputCommitCoordinatorSuite.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -139,14 +139,14 @@ class OutputCommitCoordinatorSuite extends SparkFunSuite with BeforeAndAfter {
139139
test("Only one of two duplicate commit tasks should commit") {
140140
val rdd = sc.parallelize(Seq(1), 1)
141141
sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).commitSuccessfully _,
142-
0 until rdd.partitions.size)
142+
rdd.partitions.indices)
143143
assert(tempDir.list().size === 1)
144144
}
145145

146146
test("If commit fails, if task is retried it should not be locked, and will succeed.") {
147147
val rdd = sc.parallelize(Seq(1), 1)
148148
sc.runJob(rdd, OutputCommitFunctions(tempDir.getAbsolutePath).failFirstCommitAttempt _,
149-
0 until rdd.partitions.size)
149+
rdd.partitions.indices)
150150
assert(tempDir.list().size === 1)
151151
}
152152

core/src/test/scala/org/apache/spark/util/FileAppenderSuite.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
178178
// send data to appender through the input stream, and wait for the data to be written
179179
val allGeneratedFiles = new HashSet[String]()
180180
val items = (1 to 10).map { _.toString * 10000 }
181-
for (i <- 0 until items.size) {
181+
for (i <- items.indices) {
182182
testOutputStream.write(items(i).getBytes(StandardCharsets.UTF_8))
183183
testOutputStream.flush()
184184
allGeneratedFiles ++= RollingFileAppender.getSortedRolledOverFiles(
@@ -364,7 +364,7 @@ class FileAppenderSuite extends SparkFunSuite with BeforeAndAfter with Logging {
364364
): Seq[File] = {
365365
// send data to appender through the input stream, and wait for the data to be written
366366
val expectedText = textToAppend.mkString("")
367-
for (i <- 0 until textToAppend.size) {
367+
for (i <- textToAppend.indices) {
368368
outputStream.write(textToAppend(i).getBytes(StandardCharsets.UTF_8))
369369
outputStream.flush()
370370
Thread.sleep(sleepTimeBetweenTexts)

core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -771,7 +771,7 @@ private[spark] object JsonProtocolSuite extends Assertions {
771771
assert(info1.submissionTime === info2.submissionTime)
772772
assert(info1.completionTime === info2.completionTime)
773773
assert(info1.rddInfos.size === info2.rddInfos.size)
774-
(0 until info1.rddInfos.size).foreach { i =>
774+
info1.rddInfos.indices.foreach { i =>
775775
assertEquals(info1.rddInfos(i), info2.rddInfos(i))
776776
}
777777
assert(info1.accumulables === info2.accumulables)

core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ class PrefixComparatorsSuite extends SparkFunSuite with ScalaCheckPropertyChecks
6262
test("Binary prefix comparator") {
6363

6464
def compareBinary(x: Array[Byte], y: Array[Byte]): Int = {
65-
for (i <- 0 until x.length; if i < y.length) {
65+
for (i <- x.indices; if i < y.length) {
6666
val v1 = x(i) & 0xff
6767
val v2 = y(i) & 0xff
6868
val res = v1 - v2

examples/src/main/scala/org/apache/spark/examples/MultiBroadcastTest.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,12 @@ object MultiBroadcastTest {
3737
val num = if (args.length > 1) args(1).toInt else 1000000
3838

3939
val arr1 = new Array[Int](num)
40-
for (i <- 0 until arr1.length) {
40+
for (i <- arr1.indices) {
4141
arr1(i) = i
4242
}
4343

4444
val arr2 = new Array[Int](num)
45-
for (i <- 0 until arr2.length) {
45+
for (i <- arr2.indices) {
4646
arr2(i) = i
4747
}
4848

examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ object SparkKMeans {
3838
var bestIndex = 0
3939
var closest = Double.PositiveInfinity
4040

41-
for (i <- 0 until centers.length) {
41+
for (i <- centers.indices) {
4242
val tempDist = squaredDistance(p, centers(i))
4343
if (tempDist < closest) {
4444
closest = tempDist

graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ private[graphx]
2828
class VertexAttributeBlock[VD: ClassTag](val vids: Array[VertexId], val attrs: Array[VD])
2929
extends Serializable {
3030
def iterator: Iterator[(VertexId, VD)] =
31-
(0 until vids.length).iterator.map { i => (vids(i), attrs(i)) }
31+
vids.indices.iterator.map { i => (vids(i), attrs(i)) }
3232
}
3333

3434
private[graphx]

graphx/src/test/scala/org/apache/spark/graphx/EdgeSuite.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class EdgeSuite extends SparkFunSuite {
3232
// to ascending order
3333
val sortedEdges = testEdges.sorted(Edge.lexicographicOrdering[Int])
3434

35-
for (i <- 0 until testEdges.length) {
35+
for (i <- testEdges.indices) {
3636
assert(sortedEdges(i) == testEdges(testEdges.length - i - 1))
3737
}
3838
}

mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala

+4-4
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ private[ml] class FeedForwardModel private(
480480
val layers = topology.layers
481481
val layerModels = new Array[LayerModel](layers.length)
482482
private var offset = 0
483-
for (i <- 0 until layers.length) {
483+
for (i <- layers.indices) {
484484
layerModels(i) = layers(i).createModel(
485485
new BDV[Double](weights.toArray, offset, 1, layers(i).weightSize))
486486
offset += layers(i).weightSize
@@ -495,7 +495,7 @@ private[ml] class FeedForwardModel private(
495495
if (outputs == null || outputs(0).cols != currentBatchSize) {
496496
outputs = new Array[BDM[Double]](layers.length)
497497
var inputSize = data.rows
498-
for (i <- 0 until layers.length) {
498+
for (i <- layers.indices) {
499499
if (layers(i).inPlace) {
500500
outputs(i) = outputs(i - 1)
501501
} else {
@@ -542,7 +542,7 @@ private[ml] class FeedForwardModel private(
542542
}
543543
val cumGradientArray = cumGradient.toArray
544544
var offset = 0
545-
for (i <- 0 until layerModels.length) {
545+
for (i <- layerModels.indices) {
546546
val input = if (i == 0) data else outputs(i - 1)
547547
layerModels(i).grad(deltas(i), input,
548548
new BDV[Double](cumGradientArray, offset, 1, layers(i).weightSize))
@@ -601,7 +601,7 @@ private[ann] object FeedForwardModel {
601601
val weights = BDV.zeros[Double](topology.layers.map(_.weightSize).sum)
602602
var offset = 0
603603
val random = new XORShiftRandom(seed)
604-
for (i <- 0 until layers.length) {
604+
for (i <- layers.indices) {
605605
layerModels(i) = layers(i).
606606
initModel(new BDV[Double](weights.data, offset, 1, layers(i).weightSize), random)
607607
offset += layers(i).weightSize

mllib/src/main/scala/org/apache/spark/ml/feature/QuantileDiscretizer.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ final class QuantileDiscretizer @Since("1.6.0") (@Since("1.6.0") override val ui
243243
// non-deterministic results when array contains both 0.0 and -0.0
244244
// So that here we should first normalize all 0.0 and -0.0 to be 0.0
245245
// See https://github.com/scala/bug/issues/11995
246-
for (i <- 0 until splits.length) {
246+
for (i <- splits.indices) {
247247
if (splits(i) == -0.0) {
248248
splits(i) = 0.0
249249
}

mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,7 @@ class StringIndexerModel (
367367
// This filters out any null values and also the input labels which are not in
368368
// the dataset used for fitting.
369369
private def filterInvalidData(dataset: Dataset[_], inputColNames: Seq[String]): Dataset[_] = {
370-
val conditions: Seq[Column] = (0 until inputColNames.length).map { i =>
370+
val conditions: Seq[Column] = inputColNames.indices.map { i =>
371371
val inputColName = inputColNames(i)
372372
val labelToIndex = labelsToIndexArray(i)
373373
// We have this additional lookup at `labelToIndex` when `handleInvalid` is set to
@@ -423,7 +423,7 @@ class StringIndexerModel (
423423
dataset
424424
}
425425

426-
for (i <- 0 until outputColNames.length) {
426+
for (i <- outputColNames.indices) {
427427
val inputColName = inputColNames(i)
428428
val outputColName = outputColNames(i)
429429
val labelToIndex = labelsToIndexArray(i)

mllib/src/main/scala/org/apache/spark/ml/tuning/CrossValidator.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -410,7 +410,7 @@ object CrossValidatorModel extends MLReadable[CrossValidatorModel] {
410410
val subModelsPath = new Path(path, "subModels")
411411
for (splitIndex <- 0 until instance.getNumFolds) {
412412
val splitPath = new Path(subModelsPath, s"fold${splitIndex.toString}")
413-
for (paramIndex <- 0 until instance.getEstimatorParamMaps.length) {
413+
for (paramIndex <- instance.getEstimatorParamMaps.indices) {
414414
val modelPath = new Path(splitPath, paramIndex.toString).toString
415415
instance.subModels(splitIndex)(paramIndex).asInstanceOf[MLWritable].save(modelPath)
416416
}
@@ -442,7 +442,7 @@ object CrossValidatorModel extends MLReadable[CrossValidatorModel] {
442442
Array.ofDim[Model[_]](estimatorParamMaps.length))
443443
for (splitIndex <- 0 until numFolds) {
444444
val splitPath = new Path(subModelsPath, s"fold${splitIndex.toString}")
445-
for (paramIndex <- 0 until estimatorParamMaps.length) {
445+
for (paramIndex <- estimatorParamMaps.indices) {
446446
val modelPath = new Path(splitPath, paramIndex.toString).toString
447447
_subModels(splitIndex)(paramIndex) =
448448
DefaultParamsReader.loadParamsInstance(modelPath, sc)

0 commit comments

Comments
 (0)