Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10 2.1205  0.1764 -1.1185  1.4908  0.0675  0.1430  0.3876 -0.0264 -0.4768 -1.9254
#> -0.0662 -1.0738  0.8062 -1.4375 -0.1563  0.3227  0.8181  1.4323  0.8066  1.1003
#>  0.1203 -0.6147 -0.7396 -0.6877 -0.0891  1.7723  0.3971  0.5586  0.8161 -0.3200
#> -0.4236 -1.3482  0.8030 -0.6601 -0.2042 -0.6702  0.4295 -0.2491  0.2406  0.0795
#> -1.2986 -0.6640  0.3455 -1.2750  1.0363 -1.6282  0.2707 -1.8025  0.5816  0.1887
#>  0.1619 -1.6662 -0.1134 -0.4723  0.2148 -0.0350 -1.7973  0.2193 -1.4476  0.1984
#> -0.3393 -0.7063  1.7840 -1.0234 -0.1680 -1.4681  2.2215 -1.5743  0.5386 -0.2869
#> -0.4175 -0.0191 -1.6459  0.3617 -0.6276  0.8345 -0.3140  1.5679 -0.1850  0.9095
#> -0.6149  0.2689  0.7861 -0.7552 -0.5286 -0.1296 -0.7611 -0.2980  2.4315 -0.5199
#> -0.2809  1.7076 -0.8724 -1.5906 -0.2246 -0.0623 -0.0238 -0.5413  0.8237  0.9447
#>  1.3402  0.2777  0.9777  0.7984  1.1543  0.2002  1.3613 -0.4121 -0.0584  0.5139
#>  0.3949 -0.7180  1.9839  0.4141  0.1548 -0.8558  0.8587 -0.8543 -0.4872 -0.0127
#> -1.4312  0.4585 -0.2406 -1.2558 -1.1435  0.7387  0.0900 -0.9604 -0.3427 -0.1724
#> -0.7256 -1.1339  0.3102  0.1148  0.4590 -1.3529  1.6028 -0.8203  0.0534  0.5091
#>  0.2026  2.5613  1.2030  0.6421 -1.0772  0.8534  0.4044  0.9694 -1.0704 -0.4220
#> -0.5538  1.3006  0.6086  0.1968  0.5360  1.2468  0.2708  2.3125 -1.2432 -1.4737
#> -0.0343 -0.3293  1.3558 -0.2990  0.9121  0.2860 -1.2288  1.1209  1.2386 -0.3488
#>  1.8356 -1.4020 -0.5716  0.7050  1.0263  0.8023 -0.0984  0.7269 -1.4880  1.2703
#>  0.0681 -1.2027 -0.1414 -1.5548 -1.6418  1.5658  0.4886 -1.2226  0.7043 -0.2725
#> -0.1235 -0.6660  1.5808 -0.2727 -0.4695  0.3708  0.0755 -1.1404 -0.3976 -0.1596
#> -2.8002  0.3523 -0.2021  0.3069  0.3540 -1.0240 -0.0135  0.4921  0.0693 -1.6369
#>  0.3456 -1.1613  1.3161  1.3751  0.2347 -0.4085  0.6732 -0.9905 -1.1505 -1.0547
#> -0.4208  0.3196  1.6346  0.1581 -0.8840  1.0839 -0.2425  0.1714  1.8228 -1.5914
#>  1.3774  0.4704  0.1066  0.3947 -0.2107  0.5849 -1.1456  0.9875  0.1459 -0.9110
#>  1.2122 -1.8730  0.1594  0.2921 -0.9429  1.5635 -0.2959  2.7952  0.2528  0.5367
#>  0.3917  0.0082  0.4996  0.4497  1.6715 -0.5803 -0.1205 -0.3347  0.4885 -0.7229
#> -1.1825 -0.7332  0.4030 -1.4042 -0.3873 -0.6912  2.1909 -0.4736  0.6077  0.4223
#>  0.2844  1.5958  1.2335  1.6158  0.0297 -0.5428  1.6269 -0.6963 -0.3782 -0.7860
#> -0.3557 -0.8921  2.1667 -2.4464 -1.4080  0.4779 -1.0765 -0.4999 -0.7122  1.0441
#>  0.0325  2.5706  0.4669 -0.3966 -0.2274  1.9816  0.6138 -0.7751  0.0623 -1.3907
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]