Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10 1.2495  0.6091 -1.0690 -0.0166  1.3938  1.1194 -0.2586 -1.0576 -0.0344 -0.3581
#>  0.1510 -0.1334  0.0167  0.6700 -1.3643 -1.5167 -0.3844  0.3124  0.0402  0.5480
#> -0.7318  0.4417  1.1984 -0.4938  0.0066 -0.5747  0.5220  0.5978 -0.1260 -0.1938
#> -0.7691 -0.1260  0.9954  0.5201 -0.4468 -0.3587 -1.7018 -0.2594  0.4514  0.1578
#>  0.6013 -0.5151 -0.7974 -0.6856  0.0539 -0.1893 -1.2339 -0.1484  0.2091  0.0587
#>  0.7387 -1.4977  2.1779  0.3937  0.9652  1.4255  0.1442 -0.4729  0.9483 -0.2403
#>  0.9112 -0.5716  0.1186 -0.3095 -0.6064  0.8350 -0.4593  2.5011  3.2466  0.1807
#>  1.3179 -0.1808  1.4018  0.3339  0.5278 -1.3528 -1.1230 -1.0513  1.5723 -0.3694
#>  0.1420 -1.3680  2.2601  1.3438 -0.0785 -0.0915 -0.6176 -0.5161 -1.6451  2.9431
#>  0.6651  0.4910 -1.3762 -0.1692  0.2553 -1.5726  0.6142  0.4483 -0.0748 -1.1440
#>  0.5375  0.5055 -0.3455  0.6575  0.1764  0.1094  0.2279  0.7054  1.4783 -0.1029
#> -1.1088 -0.0997 -0.0775  1.1598 -0.4733 -0.2787  1.0861  0.7385  0.9654 -2.1507
#>  0.7516 -1.6256  2.3867 -1.9732 -1.6940 -1.3743  1.1863 -0.6358 -0.5382 -0.7840
#>  1.7041 -1.4607  1.3680 -1.5216 -1.2815 -0.1860  1.6848  0.1199  0.2912  0.3143
#>  1.0183 -0.1036 -0.4311 -0.8696 -0.0262 -1.6376 -0.3012  0.1543 -0.7354 -0.2900
#> -1.7337 -1.2012  1.1882  1.8100  3.2659 -0.2314  0.5813 -1.1660 -1.1861 -0.8647
#>  0.4613  1.2493 -0.5178 -1.3412  0.9709  0.4420 -0.3324 -1.8909 -1.1610  0.9463
#> -2.6261 -0.5444 -0.5643 -1.0955 -1.1087  0.8311  0.1976 -1.6208  0.1859  1.8698
#> -1.5762 -1.5527  1.2426  0.5504  0.0972  0.9758 -0.8191  0.3776  0.1480  0.2924
#> -0.2763  0.0554 -0.8556  1.8417  1.3189 -1.1393 -0.4279 -1.8547  0.1733  0.8090
#> -0.1023 -1.0378 -0.2691  0.7317  0.7775 -0.0868 -1.0975  0.5162  2.0013 -0.4643
#> -1.2354 -0.8254  0.0219  1.2743 -0.2680  1.1962  1.7911  1.4018  0.3904 -1.4323
#> -0.1765  1.0818  0.5222 -0.1679  0.3774 -1.0082  0.7265 -0.2452  0.5839 -1.6902
#>  1.7448 -0.5870 -0.6490  0.6485 -0.4086 -0.2505  0.8305  0.2145 -1.8956 -0.2285
#>  0.1909  0.8784  2.0803  1.8147 -0.6791  0.9736  1.1833 -0.3603  1.0244 -1.6594
#> -0.9989  2.0104  1.4421 -0.3478 -1.0176  0.7541  1.3366 -0.0143 -0.2454  1.7676
#>  1.3441  0.0959 -0.3711  0.0810 -1.2710  0.4426 -0.4177  0.3265 -0.2810 -0.7111
#> -0.0904 -1.4582  0.1510 -1.0543  0.4626  0.2896  0.3799  0.4717  1.0398  1.6394
#>  1.2291 -0.2220  0.4246  0.3083 -0.5979  1.0825 -0.3903  0.6505  0.1277 -0.2582
#>  0.1294 -0.9958  1.0129 -1.2177 -0.4816  0.9413  0.1447 -0.2653  0.1235  0.4562
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]