Skip to contents

For use with nn_sequential.

Usage

nn_flatten(start_dim = 2, end_dim = -1)

Arguments

start_dim

first dim to flatten (default = 2).

end_dim

last dim to flatten (default = -1).

Shape

  • Input: (*, S_start,..., S_i, ..., S_end, *), where S_i is the size at dimension i and * means any number of dimensions including none.

  • Output: (*, S_start*...*S_i*...S_end, *).

See also

Examples

if (torch_is_installed()) {
input <- torch_randn(32, 1, 5, 5)
m <- nn_flatten()
m(input)
}
#> torch_tensor
#> Columns 1 to 10-0.4735 -0.6002 -1.0570  0.9925 -0.3509 -0.8637 -1.1725  0.9191 -0.1679  0.6476
#>  0.7705 -0.2972  1.0547 -0.1967 -0.3449 -1.6718  1.5897  0.0697  1.0726  2.1181
#> -2.4679  1.8160  1.3302 -0.5551  2.0044  0.0926  0.2648 -0.6859  0.5536  0.9213
#> -0.6150  0.7057 -0.4596 -0.5154 -1.8173 -0.6795 -1.3062  1.3666 -0.6238  0.2384
#>  0.3199 -1.2168 -0.2288 -0.1178  1.9832  0.8242  0.8394  1.0009  0.0199  0.9222
#>  2.4742 -0.9596  1.4961 -0.4285 -1.7235  0.7611  1.0576 -0.3124  0.1363 -2.7030
#> -0.0702 -0.0452  0.4570 -0.9273 -0.2648  0.4426  1.1174 -0.5110 -0.1325 -0.0104
#> -0.8217  0.0440 -2.1218  0.3801  1.3286  0.0445 -1.2826 -0.5898 -0.1526  1.0001
#> -1.6048  1.4222  1.7737  0.5557 -0.4818 -1.3665 -1.1690 -0.4399 -0.0249  0.6912
#> -1.0905  1.4149 -0.0520  0.9849  0.8969  0.1259 -0.7657  0.7992  0.5281 -1.3826
#>  2.6643  0.9733 -0.5598 -1.4982  0.3277  0.1026 -0.9441  1.2498 -0.6762  0.9815
#>  0.7346  0.8643 -0.2920  1.8119 -0.2410  1.1659  1.7623  1.7971 -0.0687  1.1715
#>  0.4472 -0.0911 -1.4262 -1.6933 -0.2441  2.0970 -0.6153  0.1728  0.3248  1.7091
#> -1.3463 -1.6264 -1.0195  0.4637 -1.4726 -0.1250  1.2856 -1.7700  0.5072  1.6598
#> -0.4438 -1.8380  0.0588 -2.4394  0.6764 -0.0639  1.8756 -0.3163 -0.2380 -0.1688
#> -0.2080  0.8897  1.8669  0.9608 -0.5573 -1.1845  0.3840 -0.0326  2.1312 -0.3968
#>  2.1574 -0.7087 -0.8721 -0.6567 -0.8175 -0.5640  0.5017 -0.0841  0.3146 -0.4290
#>  0.3642  0.4323  0.2177  0.1795 -0.1248 -0.5999  1.6046  1.4058  0.0670  0.8043
#> -0.0654  0.4150 -2.4448  1.3073 -0.0707  1.5836 -0.4615 -0.3473 -1.7631  2.0533
#> -0.3769 -0.2962  2.7815  0.6330  0.5950  0.6213  0.2558  0.3231  1.2050 -1.4661
#> -0.4006 -0.6690  1.5285  0.1289 -0.3681  0.9430  0.5462 -0.3209  1.4412 -0.0572
#> -0.1847  0.5141  0.5964  0.2506  0.9944  0.1737  0.0742  0.7166 -0.1379 -0.6825
#>  0.1165 -0.6864  0.5681 -0.3674 -0.4514  2.0482 -0.2907 -0.5915 -1.1095  1.5838
#>  0.7664  0.2617  0.7037 -1.3737 -1.5190 -0.0127  0.3907  0.1841 -1.5012  0.2350
#>  0.7989  0.1734  0.3294  0.3923 -0.7658  0.8407 -0.4390 -0.0492 -1.2816 -1.1690
#>  2.5119 -1.1566 -1.5160  0.1070  0.3979 -0.9003 -1.3626 -0.8913 -1.6753 -0.6730
#> -0.9750  0.4008  0.6589 -0.4370  0.4448  0.0790 -0.5225  0.1672 -1.4090  0.7346
#> -0.5181 -1.3630 -1.6718 -0.3247 -0.0511  0.9342  0.9192 -0.0532 -0.3866  1.9482
#> -0.0664 -0.1886  0.8888 -1.3655 -0.1244 -0.3151  0.2378  0.8967  1.1503 -0.3581
#>  2.3047 -0.0807 -1.2262  0.2142 -0.9276  0.5558  0.3456 -0.4092  0.4665 -1.0516
#> ... [the output was truncated (use n=-1 to disable)]
#> [ CPUFloatType{32,25} ]